diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 16784b30d8..a0f1f3dc22 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -7,7 +7,6 @@ repos: - id: check-docstring-first - id: check-json - id: check-yaml - - id: double-quote-string-fixer - id: debug-statements - id: mixed-line-ending @@ -24,12 +23,8 @@ repos: - id: black - id: black-jupyter - - repo: https://github.com/PyCQA/flake8 - rev: 6.0.0 + - repo: https://github.com/charliermarsh/ruff-pre-commit + rev: 'v0.0.239' hooks: - - id: flake8 - - - repo: https://github.com/PyCQA/isort - rev: 5.12.0 - hooks: - - id: isort + - id: ruff + args: [ "--fix" ] diff --git a/act/corrections/mpl.py b/act/corrections/mpl.py index 73e2616df1..e0601817f3 100644 --- a/act/corrections/mpl.py +++ b/act/corrections/mpl.py @@ -141,8 +141,8 @@ def correct_mpl( x_data = x_data - x_ap # R-Squared Correction - co_data = co_data * height ** 2 - x_data = x_data * height ** 2 + co_data = co_data * height**2 + x_data = x_data * height**2 # Overlap Correction for j in range(ds[range_bins_var_name].size): diff --git a/act/discovery/__init__.py b/act/discovery/__init__.py index c0a5fab10d..2201e1d841 100644 --- a/act/discovery/__init__.py +++ b/act/discovery/__init__.py @@ -16,6 +16,6 @@ 'cropscape': ['get_crop_type'], 'noaapsl': ['download_noaa_psl_data'], 'neon': ['get_neon_site_products', 'get_neon_product_avail', 'download_neon_data'], - 'surfrad': ['download_surfrad_data'] + 'surfrad': ['download_surfrad_data'], }, ) diff --git a/act/discovery/airnow.py b/act/discovery/airnow.py index 7b458e780c..7ce48cea3d 100644 --- a/act/discovery/airnow.py +++ b/act/discovery/airnow.py @@ -38,7 +38,7 @@ def get_airnow_forecast(token, date, zipcode=None, latlon=None, distance=25): """ # default beginning of the query url - query_url = ('https://airnowapi.org/aq/forecast/') + query_url = 'https://airnowapi.org/aq/forecast/' # checking is either a zipcode or latlon coordinate is defined # if neither is defined then error is raised @@ -46,17 +46,34 @@ def get_airnow_forecast(token, date, zipcode=None, latlon=None, distance=25): raise NameError("Zipcode or latlon must be defined") if zipcode: - url = (query_url + ('zipcode/?' + 'format=text/csv' + '&zipCode=' - + str(zipcode) + '&date=' + str(date) - + '&distance=' + str(distance) - + '&API_KEY=' + str(token))) + url = query_url + ( + 'zipcode/?' + + 'format=text/csv' + + '&zipCode=' + + str(zipcode) + + '&date=' + + str(date) + + '&distance=' + + str(distance) + + '&API_KEY=' + + str(token) + ) if latlon: - url = (query_url + ('latLong/?' + 'format=text/csv' - + '&latitude=' + str(latlon[0]) + '&longitude=' - + str(latlon[1]) + '&date=' + str(date) - + '&distance=' + str(distance) - + '&API_KEY=' + str(token))) + url = query_url + ( + 'latLong/?' + + 'format=text/csv' + + '&latitude=' + + str(latlon[0]) + + '&longitude=' + + str(latlon[1]) + + '&date=' + + str(date) + + '&distance=' + + str(distance) + + '&API_KEY=' + + str(token) + ) df = pd.read_csv(url) @@ -103,7 +120,7 @@ def get_airnow_obs(token, date=None, zipcode=None, latlon=None, distance=25): """ # default beginning of the query url - query_url = ('https://www.airnowapi.org/aq/observation/') + query_url = 'https://www.airnowapi.org/aq/observation/' # checking is either a zipcode or latlon coordinate is defined # if neither is defined then error is raised @@ -114,26 +131,67 @@ def get_airnow_obs(token, date=None, zipcode=None, latlon=None, distance=25): if date is None: obs_type = 'current' if zipcode: - url = (query_url + ('zipCode/' + str(obs_type) + '/?' + 'format=text/csv' - + '&zipCode=' + str(zipcode) + '&distance=' + str(distance) - + '&API_KEY=' + str(token))) + url = query_url + ( + 'zipCode/' + + str(obs_type) + + '/?' + + 'format=text/csv' + + '&zipCode=' + + str(zipcode) + + '&distance=' + + str(distance) + + '&API_KEY=' + + str(token) + ) if latlon: - url = (query_url + ('latLong/' + str(obs_type) + '/?' + 'format=text/csv' - + '&latitude=' + str(latlon[0]) - + '&longitude=' + str(latlon[1]) + '&distance=' - + str(distance) + '&API_KEY=' + str(token))) + url = query_url + ( + 'latLong/' + + str(obs_type) + + '/?' + + 'format=text/csv' + + '&latitude=' + + str(latlon[0]) + + '&longitude=' + + str(latlon[1]) + + '&distance=' + + str(distance) + + '&API_KEY=' + + str(token) + ) else: obs_type = 'historical' if zipcode: - url = (query_url + ('zipCode/' + str(obs_type) + '/?' + 'format=text/csv' - + '&zipCode=' + str(zipcode) + '&date=' + str(date) - + 'T00-0000&distance=' + str(distance) + '&API_KEY=' + str(token))) + url = query_url + ( + 'zipCode/' + + str(obs_type) + + '/?' + + 'format=text/csv' + + '&zipCode=' + + str(zipcode) + + '&date=' + + str(date) + + 'T00-0000&distance=' + + str(distance) + + '&API_KEY=' + + str(token) + ) if latlon: - url = (query_url + ('latLong/' + str(obs_type) + '/?' + 'format=text/csv' - + '&latitude=' + str(latlon[0]) - + '&longitude=' + str(latlon[1]) + '&date=' - + str(date) + 'T00-0000&distance=' + str(distance) - + '&API_KEY=' + str(token))) + url = query_url + ( + 'latLong/' + + str(obs_type) + + '/?' + + 'format=text/csv' + + '&latitude=' + + str(latlon[0]) + + '&longitude=' + + str(latlon[1]) + + '&date=' + + str(date) + + 'T00-0000&distance=' + + str(distance) + + '&API_KEY=' + + str(token) + ) df = pd.read_csv(url) @@ -143,8 +201,9 @@ def get_airnow_obs(token, date=None, zipcode=None, latlon=None, distance=25): return ds -def get_airnow_bounded_obs(token, start_date, end_date, latlon_bnds, parameters='OZONE,PM25', data_type='B', - mon_type=0): +def get_airnow_bounded_obs( + token, start_date, end_date, latlon_bnds, parameters='OZONE,PM25', data_type='B', mon_type=0 +): """ Get AQI values or data concentrations for a specific date and time range and set of parameters within a geographic area of intrest @@ -184,16 +243,44 @@ def get_airnow_bounded_obs(token, start_date, end_date, latlon_bnds, parameters= verbose = 1 inc_raw_con = 1 - url = ('https://www.airnowapi.org/aq/data/?startDate=' + str(start_date) - + '&endDate=' + str(end_date) + '¶meters=' + str(parameters) - + '&BBOX=' + str(latlon_bnds) + '&dataType=' + str(data_type) - + '&format=text/csv' + '&verbose=' + str(verbose) - + '&monitorType=' + str(mon_type) + '&includerawconcentrations=' - + str(inc_raw_con) + '&API_KEY=' + str(token)) + url = ( + 'https://www.airnowapi.org/aq/data/?startDate=' + + str(start_date) + + '&endDate=' + + str(end_date) + + '¶meters=' + + str(parameters) + + '&BBOX=' + + str(latlon_bnds) + + '&dataType=' + + str(data_type) + + '&format=text/csv' + + '&verbose=' + + str(verbose) + + '&monitorType=' + + str(mon_type) + + '&includerawconcentrations=' + + str(inc_raw_con) + + '&API_KEY=' + + str(token) + ) # Set Column names - names = ['latitude', 'longitude', 'time', 'parameter', 'concentration', 'unit', - 'raw_concentration', 'AQI', 'category', 'site_name', 'site_agency', 'aqs_id', 'full_aqs_id'] + names = [ + 'latitude', + 'longitude', + 'time', + 'parameter', + 'concentration', + 'unit', + 'raw_concentration', + 'AQI', + 'category', + 'site_name', + 'site_agency', + 'aqs_id', + 'full_aqs_id', + ] # Read data into CSV df = pd.read_csv(url, names=names) @@ -211,12 +298,9 @@ def get_airnow_bounded_obs(token, start_date, end_date, latlon_bnds, parameters= data_vars={ 'latitude': (['sites'], latitude), 'longitude': (['sites'], longitude), - 'aqs_id': (['sites'], aqs_id) + 'aqs_id': (['sites'], aqs_id), }, - coords={ - 'time': (['time'], times), - 'sites': (['sites'], sites) - } + coords={'time': (['time'], times), 'sites': (['sites'], sites)}, ) # Set up emtpy data with nans @@ -233,7 +317,11 @@ def get_airnow_bounded_obs(token, start_date, end_date, latlon_bnds, parameters= data[v, t, s] = list(result[variables[v]])[0] atts = {'units': ''} else: - result = df.loc[(df['time'] == times[t]) & (df['site_name'] == sites[s]) & (df['parameter'] == variables[v])] + result = df.loc[ + (df['time'] == times[t]) + & (df['site_name'] == sites[s]) + & (df['parameter'] == variables[v]) + ] if len(result['concentration']) > 0: data[v, t, s] = list(result['concentration'])[0] atts = {'units': list(result['unit'])[0]} diff --git a/act/discovery/arm.py b/act/discovery/arm.py index ab831771b0..1fda756913 100644 --- a/act/discovery/arm.py +++ b/act/discovery/arm.py @@ -3,14 +3,11 @@ """ -import argparse import json import os -import sys from datetime import timedelta import requests import textwrap -import warnings try: from urllib.request import urlopen @@ -163,7 +160,9 @@ def download_arm_data(username, token, datastream, startdate, enddate, time=None open_bytes_file.write(data) file_names.append(output_file) # Get ARM DOI and print it out - doi = get_arm_doi(datastream, start_datetime.strftime('%Y-%m-%d'), end_datetime.strftime('%Y-%m-%d')) + doi = get_arm_doi( + datastream, start_datetime.strftime('%Y-%m-%d'), end_datetime.strftime('%Y-%m-%d') + ) print('\nIf you use these data to prepare a publication, please cite:\n') print(textwrap.fill(doi, width=80)) print('') @@ -197,7 +196,11 @@ def get_arm_doi(datastream, startdate, enddate): """ # Get the DOI information - doi_url = 'https://adc.arm.gov/citationservice/citation/datastream?id=' + datastream + '&citationType=apa' + doi_url = ( + 'https://adc.arm.gov/citationservice/citation/datastream?id=' + + datastream + + '&citationType=apa' + ) doi_url += '&startDate=' + startdate doi_url += '&endDate=' + enddate try: diff --git a/act/discovery/asos.py b/act/discovery/asos.py index 5f5ae27534..0d37e18d8b 100644 --- a/act/discovery/asos.py +++ b/act/discovery/asos.py @@ -10,8 +10,7 @@ import numpy as np import pandas as pd -import xarray as xr -from six import StringIO +from io import StringIO try: from urllib.request import urlopen diff --git a/act/discovery/neon.py b/act/discovery/neon.py index cfe3eff2c1..13251bd888 100644 --- a/act/discovery/neon.py +++ b/act/discovery/neon.py @@ -7,7 +7,6 @@ """ -import json import requests import os import shutil diff --git a/act/discovery/noaapsl.py b/act/discovery/noaapsl.py index 30b55ff2c2..3239731bf4 100644 --- a/act/discovery/noaapsl.py +++ b/act/discovery/noaapsl.py @@ -2,10 +2,8 @@ Function for downloading data from NOAA PSL Profiler Network """ -import json from datetime import datetime import pandas as pd -import numpy as np import os try: @@ -14,8 +12,9 @@ from urllib import urlopen -def download_noaa_psl_data(site=None, instrument=None, startdate=None, enddate=None, - hour=None, output=None): +def download_noaa_psl_data( + site=None, instrument=None, startdate=None, enddate=None, hour=None, output=None +): """ Function to download data from the NOAA PSL Profiler Network Data Library https://psl.noaa.gov/data/obs/datadisplay/ @@ -76,9 +75,18 @@ def download_noaa_psl_data(site=None, instrument=None, startdate=None, enddate=N url = 'https://downloads.psl.noaa.gov/psd2/data/realtime/' # Set list of strings that all point to the surface meteorology dataset - met_ds = ['Pressure', 'Datalogger', 'Net Radiation', 'Temp/RH', - 'Solar Radiation', 'Tipping Bucket', 'TBRG', 'Wind Speed', - 'Wind Direction', 'Wind Speed and Direction'] + met_ds = [ + 'Pressure', + 'Datalogger', + 'Net Radiation', + 'Temp/RH', + 'Solar Radiation', + 'Tipping Bucket', + 'TBRG', + 'Wind Speed', + 'Wind Direction', + 'Wind Speed and Direction', + ] # Add to the url depending on which instrument is requested if 'Parsivel' in instrument: @@ -153,8 +161,9 @@ def download_noaa_psl_data(site=None, instrument=None, startdate=None, enddate=N # Write each file out to a file with same name as online for f in files: if hour is not None: - if (str(doy).zfill(3) + str(hour)) not in f and\ - (str(doy).zfill(3) + '.' + str(hour)) not in f: + if (str(doy).zfill(3) + str(hour)) not in f and ( + str(doy).zfill(3) + '.' + str(hour) + ) not in f: continue output_file = os.path.join(output_dir, f) try: diff --git a/act/discovery/surfrad.py b/act/discovery/surfrad.py index c6ba6fd356..23f5e3e05a 100644 --- a/act/discovery/surfrad.py +++ b/act/discovery/surfrad.py @@ -3,13 +3,8 @@ NOAA Surface Radiation Budget network """ -import json from datetime import datetime -import pandas as pd -import numpy as np import os -import re -import requests try: from urllib.request import urlopen diff --git a/act/io/__init__.py b/act/io/__init__.py index 6549c83612..4638ce4bac 100644 --- a/act/io/__init__.py +++ b/act/io/__init__.py @@ -39,6 +39,6 @@ ], 'pysp2': ['read_hk_file', 'read_sp2', 'read_sp2_dat'], 'sodar': ['read_mfas_sodar'], - 'hysplit': ['read_hysplit'] + 'hysplit': ['read_hysplit'], }, ) diff --git a/act/io/hysplit.py b/act/io/hysplit.py index 5c0c2fd6aa..35fc96710a 100644 --- a/act/io/hysplit.py +++ b/act/io/hysplit.py @@ -1,10 +1,8 @@ -import os import xarray as xr import numpy as np import pandas as pd from datetime import datetime -from .text import read_csv def read_hysplit(filename, base_year=2000): @@ -26,7 +24,7 @@ def read_hysplit(filename, base_year=2000): ds = xr.Dataset({}) num_lines = 0 - with open(filename, 'r') as filebuf: + with open(filename) as filebuf: num_grids = int(filebuf.readline().split()[0]) num_lines += 1 grid_times = [] @@ -37,7 +35,8 @@ def read_hysplit(filename, base_year=2000): num_lines += 1 grid_names.append(data[0]) grid_times.append( - datetime(year=int(data[1]), month=int(data[2]), day=int(data[3]), hour=int(data[4]))) + datetime(year=int(data[1]), month=int(data[2]), day=int(data[3]), hour=int(data[4])) + ) forecast_hours[i] = int(data[5]) ds["grid_forecast_hour"] = xr.DataArray(forecast_hours, dims=["num_grids"]) ds["grid_forecast_hour"].attrs["standard_name"] = "Grid forecast hour" @@ -56,8 +55,13 @@ def read_hysplit(filename, base_year=2000): data = filebuf.readline().split() num_lines += 1 traj_times.append( - datetime(year=(base_year + int(data[0])), month=int(data[1]), - day=int(data[2]), hour=int(data[3]))) + datetime( + year=(base_year + int(data[0])), + month=int(data[1]), + day=int(data[2]), + hour=int(data[3]), + ) + ) start_lats[i] = float(data[4]) start_lons[i] = float(data[5]) start_alt[i] = float(data[6]) @@ -73,15 +77,29 @@ def read_hysplit(filename, base_year=2000): ds["start_altitude"].attrs["units"] = "degree" data = filebuf.readline().split() num_lines += 1 - var_list = ["trajectory_number", "grid_number", "year", "month", "day", - "hour", "minute", "forecast_hour", "age", "lat", "lon", "alt"] + var_list = [ + "trajectory_number", + "grid_number", + "year", + "month", + "day", + "hour", + "minute", + "forecast_hour", + "age", + "lat", + "lon", + "alt", + ] for variable in data[1:]: var_list.append(variable) + input_df = pd.read_csv( filename, sep='\s+', index_col=False, names=var_list, skiprows=12) # noqa W605 input_df['year'] = base_year + input_df['year'] - input_df['time'] = pd.to_datetime(input_df[["year", "month", "day", "hour", "minute"]], - format='%y%m%d%H%M') + input_df['time'] = pd.to_datetime( + input_df[["year", "month", "day", "hour", "minute"]], format='%y%m%d%H%M' + ) input_df = input_df.set_index("time") del input_df["year"] del input_df["month"] diff --git a/act/io/icartt.py b/act/io/icartt.py index 2941d29186..42990b7050 100644 --- a/act/io/icartt.py +++ b/act/io/icartt.py @@ -7,11 +7,11 @@ - https://www.earthdata.nasa.gov/s3fs-public/imported/ESDS-RFC-029v2.pdf """ -import numpy as np import xarray as xr try: import icartt + _ICARTT_AVAILABLE = True _format = icartt.Formats.FFI1001 except ImportError: @@ -19,8 +19,7 @@ _format = None -def read_icartt(filename, format=_format, - return_None=False, **kwargs): +def read_icartt(filename, format=_format, return_None=False, **kwargs): """ Returns `xarray.Dataset` with stored data and metadata from a user-defined @@ -56,8 +55,7 @@ def read_icartt(filename, format=_format, """ if not _ICARTT_AVAILABLE: - raise ImportError( - "ICARTT is required to use to read ICARTT files but is not installed") + raise ImportError("ICARTT is required to use to read ICARTT files but is not installed") ds = None @@ -78,8 +76,7 @@ def read_icartt(filename, format=_format, return None # If requested return None for File not found error - if (type(exception).__name__ == 'OSError' - and exception.args[0] == 'no files to open'): + if type(exception).__name__ == 'OSError' and exception.args[0] == 'no files to open': return None # Define the Uncertainty for each variable. Note it may not be calculated. @@ -106,9 +103,7 @@ def read_icartt(filename, format=_format, key2 = 'quality_flag' else: key2 = key - da = xr.DataArray(ict.data[key], - coords=dict(time=ict.times), - name=key2, dims=['time']) + da = xr.DataArray(ict.data[key], coords=dict(time=ict.times), name=key2, dims=['time']) # Assume if Uncertainity does not match the number of variables, # values were not set within the file. Needs to be string! if len(uncertainty) != len(ict.variables): diff --git a/act/io/neon.py b/act/io/neon.py index 86d9bbd3eb..284bae0353 100644 --- a/act/io/neon.py +++ b/act/io/neon.py @@ -2,9 +2,7 @@ Modules for reading in NOAA PSL data. """ -import datetime as dt -import numpy as np import pandas as pd import xarray as xr diff --git a/act/io/sodar.py b/act/io/sodar.py index 15dc238f45..ec5c83f70c 100644 --- a/act/io/sodar.py +++ b/act/io/sodar.py @@ -74,11 +74,9 @@ def read_mfas_sodar(filepath): # Parse data to a dataframe skipping rows that aren't data. # tmp_columns is used to removed '#' column that causes # columns to move over by one. - df = pd.read_table(filepath, - sep=r'\s+', - skiprows=skip_full_ind, - names=tmp_columns, - usecols=columns) + df = pd.read_table( + filepath, sep=r'\s+', skiprows=skip_full_ind, names=tmp_columns, usecols=columns + ) df = df[~df['W'].isin(['dir'])].reset_index(drop=True) @@ -158,7 +156,7 @@ def _metadata_retrieval(lines): file_type_ind = np.argwhere(line_array == '# file type')[0][0] # Index the section of file information. - file_def = line_array[file_info_ind + 2:file_type_ind - 1] + file_def = line_array[file_info_ind + 2 : file_type_ind - 1] # Create a dictionary of file information to be plugged in later to the xarray # dataset attributes. @@ -179,7 +177,7 @@ def _metadata_retrieval(lines): data_ind = np.argwhere(line_array == '# beginning of data block')[0][0] # Index the section of variable information. - variable_def = line_array[variable_info_ind + 2 :data_ind - 1] + variable_def = line_array[variable_info_ind + 2 : data_ind - 1] # Create a dictionary of variable information to be plugged in later to the xarray # variable attributes. Skipping error code as it does not have metadata similar to diff --git a/act/plotting/__init__.py b/act/plotting/__init__.py index 68547a9a3c..814fa43f0a 100644 --- a/act/plotting/__init__.py +++ b/act/plotting/__init__.py @@ -45,6 +45,6 @@ 'timeseriesdisplay': ['TimeSeriesDisplay'], 'windrosedisplay': ['WindRoseDisplay'], 'xsectiondisplay': ['XSectionDisplay'], - 'distributiondisplay' : ['DistributionDisplay'], + 'distributiondisplay': ['DistributionDisplay'], }, ) diff --git a/act/plotting/distributiondisplay.py b/act/plotting/distributiondisplay.py index 0dec535c43..dd8cee55e5 100644 --- a/act/plotting/distributiondisplay.py +++ b/act/plotting/distributiondisplay.py @@ -184,7 +184,8 @@ def plot_stacked_bar( ydata.values.flatten(), density=density, bins=[bins, sortby_bins], - **hist_kwargs) + **hist_kwargs, + ) x_inds = (x_bins[:-1] + x_bins[1:]) / 2.0 self.axes[subplot_index].bar( x_inds, @@ -202,8 +203,9 @@ def plot_stacked_bar( ) self.axes[subplot_index].legend() else: - my_hist, bins = np.histogram(xdata.values.flatten(), bins=bins, - density=density, **hist_kwargs) + my_hist, bins = np.histogram( + xdata.values.flatten(), bins=bins, density=density, **hist_kwargs + ) x_inds = (bins[:-1] + bins[1:]) / 2.0 self.axes[subplot_index].bar(x_inds, my_hist) @@ -322,7 +324,9 @@ def plot_size_distribution( ) if time is not None: t = pd.Timestamp(time) - set_title += ''.join([' at ', ':'.join([str(t.hour), str(t.minute), str(t.second)])]) + set_title += ''.join( + [' at ', ':'.join([str(t.hour), str(t.minute), str(t.second)])] + ) self.axes[subplot_index].set_title(set_title) self.axes[subplot_index].step(bins.values, xdata.values, **kwargs) self.axes[subplot_index].set_xlabel(xtitle) @@ -425,7 +429,7 @@ def plot_stairstep( ydata.values.flatten(), density=density, bins=[bins, sortby_bins], - **hist_kwargs + **hist_kwargs, ) x_inds = (x_bins[:-1] + x_bins[1:]) / 2.0 self.axes[subplot_index].step( @@ -443,8 +447,9 @@ def plot_stairstep( ) self.axes[subplot_index].legend() else: - my_hist, bins = np.histogram(xdata.values.flatten(), bins=bins, - density=density, **hist_kwargs) + my_hist, bins = np.histogram( + xdata.values.flatten(), bins=bins, density=density, **hist_kwargs + ) x_inds = (bins[:-1] + bins[1:]) / 2.0 self.axes[subplot_index].step(x_inds, my_hist, **kwargs) @@ -575,15 +580,15 @@ def plot_heatmap( if x_bins is None: my_hist, x_bins, y_bins = np.histogram2d( - xdata.values.flatten(), ydata.values.flatten(), density=density, - **hist_kwargs) + xdata.values.flatten(), ydata.values.flatten(), density=density, **hist_kwargs + ) else: my_hist, x_bins, y_bins = np.histogram2d( xdata.values.flatten(), ydata.values.flatten(), density=density, bins=[x_bins, y_bins], - **hist_kwargs + **hist_kwargs, ) # Adding in the ability to threshold the heatmaps if threshold is not None: @@ -616,7 +621,7 @@ def plot_heatmap( return return_dict - def set_ratio_line(self, subplot_index=(0, )): + def set_ratio_line(self, subplot_index=(0,)): """ Sets the 1:1 ratio line. @@ -633,16 +638,17 @@ def set_ratio_line(self, subplot_index=(0, )): ratio = np.linspace(xlims, xlims[-1]) self.axes[subplot_index].plot(ratio, ratio, 'k--') - def plot_scatter(self, - x_field, - y_field, - m_field=None, - dsname=None, - cbar_label=None, - set_title=None, - subplot_index=(0,), - **kwargs, - ): + def plot_scatter( + self, + x_field, + y_field, + m_field=None, + dsname=None, + cbar_label=None, + set_title=None, + subplot_index=(0,), + **kwargs, + ): """ This procedure will produce a scatter plot from 2 variables. @@ -745,18 +751,19 @@ def plot_scatter(self, return self.axes[subplot_index] - def plot_violin(self, - field, - positions=None, - dsname=None, - vert=True, - showmeans=True, - showmedians=True, - showextrema=True, - subplot_index=(0,), - set_title=None, - **kwargs, - ): + def plot_violin( + self, + field, + positions=None, + dsname=None, + vert=True, + showmeans=True, + showmedians=True, + showextrema=True, + subplot_index=(0,), + set_title=None, + **kwargs, + ): """ This procedure will produce a violin plot for the selected field (or fields). @@ -819,14 +826,15 @@ def plot_violin(self, axtitle = field # Display the scatter plot, pass keyword args for unspecified attributes - scc = self.axes[subplot_index].violinplot(ndata, - positions=positions, - vert=vert, - showmeans=showmeans, - showmedians=showmedians, - showextrema=showextrema, - **kwargs - ) + scc = self.axes[subplot_index].violinplot( + ndata, + positions=positions, + vert=vert, + showmeans=showmeans, + showmedians=showmedians, + showextrema=showextrema, + **kwargs, + ) if showmeans is True: scc['cmeans'].set_edgecolor('red') scc['cmeans'].set_label('mean') diff --git a/act/plotting/geodisplay.py b/act/plotting/geodisplay.py index b01d425b43..451bbe1826 100644 --- a/act/plotting/geodisplay.py +++ b/act/plotting/geodisplay.py @@ -3,7 +3,6 @@ """ -import warnings import matplotlib import matplotlib.pyplot as plt diff --git a/act/plotting/plot.py b/act/plotting/plot.py index 0d5a31eca1..9dfc15c5de 100644 --- a/act/plotting/plot.py +++ b/act/plotting/plot.py @@ -71,8 +71,7 @@ class with this set to None will create a new figure handle. See the """ - def __init__(self, ds, subplot_shape=(1,), ds_name=None, subplot_kw=None, - **kwargs): + def __init__(self, ds, subplot_shape=(1,), ds_name=None, subplot_kw=None, **kwargs): if isinstance(ds, xr.Dataset): if 'datastream' in ds.attrs.keys() is not None: self._ds = {ds.attrs['datastream']: ds} @@ -122,8 +121,7 @@ def __init__(self, ds, subplot_shape=(1,), ds_name=None, subplot_kw=None, if subplot_shape is not None: self.add_subplots(subplot_shape, subplot_kw=subplot_kw, **kwargs) - def add_subplots(self, subplot_shape=(1,), secondary_y=False, subplot_kw=None, - **kwargs): + def add_subplots(self, subplot_shape=(1,), secondary_y=False, subplot_kw=None, **kwargs): """ Adds subplots to the Display object. The current figure in the object will be deleted and overwritten. @@ -235,8 +233,9 @@ def assign_to_figure_axis(self, fig, ax): self.fig = fig self.axes = np.array([ax]) - def add_colorbar(self, mappable, title=None, subplot_index=(0,), pad=None, - width=None, **kwargs): + def add_colorbar( + self, mappable, title=None, subplot_index=(0,), pad=None, width=None, **kwargs + ): """ Adds a colorbar to the plot. @@ -301,7 +300,7 @@ def group_by(self, units): return DisplayGroupby(self, units) -class DisplayGroupby(object): +class DisplayGroupby: def __init__(self, display, units): """ @@ -348,8 +347,7 @@ def plot_group(self, func_name, dsname=None, **kwargs): func = getattr(self.display, func_name) if not callable(func): - raise RuntimeError("The specified string is not a function of " - "the Display object.") + raise RuntimeError("The specified string is not a function of " "the Display object.") subplot_shape = self.display.axes.shape i = 0 @@ -382,18 +380,25 @@ def plot_group(self, func_name, dsname=None, **kwargs): days_in_year = 365 year_diff = ds1.time.dt.year - first_year time_diff = np.array( - [np.timedelta64(x * days_in_year, 'D') for x in year_diff.values]) + [np.timedelta64(x * days_in_year, 'D') for x in year_diff.values] + ) ds1['time'] = ds1.time - time_diff self.display._ds[key + '%d_%d' % (k, yr)] = ds1 func(dsname=key + '%d_%d' % (k, yr), label=str(yr), **kwargs) self.mapping[key + '%d_%d' % (k, yr)] = subplot_index - self.xlims[key + '%d_%d' % (k, yr)] = (ds1.time.values.min(), ds1.time.values.max()) + self.xlims[key + '%d_%d' % (k, yr)] = ( + ds1.time.values.min(), + ds1.time.values.max(), + ) del self.display._ds[key + '_%d' % k] else: func(dsname=key + '_%d' % k, **kwargs) self.mapping[key + '_%d' % k] = subplot_index if self.isTimeSeriesDisplay: - self.xlims[key + '_%d' % k] = (ds.time.values.min(), ds.time.values.max()) + self.xlims[key + '_%d' % k] = ( + ds.time.values.min(), + ds.time.values.max(), + ) i = i + 1 if wrap_around is False and i < np.prod(subplot_shape): diff --git a/act/plotting/skewtdisplay.py b/act/plotting/skewtdisplay.py index 36190cf457..6fab52ce42 100644 --- a/act/plotting/skewtdisplay.py +++ b/act/plotting/skewtdisplay.py @@ -3,7 +3,6 @@ """ -import warnings from copy import deepcopy import matplotlib.pyplot as plt @@ -56,8 +55,7 @@ def __init__(self, ds, subplot_shape=(1,), subplot=None, ds_name=None, set_fig=N # We want to use our routine to handle subplot adding, not the main # one new_kwargs = kwargs.copy() - super().__init__(ds, None, ds_name, subplot_kw=dict(projection='skewx'), - **new_kwargs) + super().__init__(ds, None, ds_name, subplot_kw=dict(projection='skewx'), **new_kwargs) # Make a SkewT object for each subplot self.add_subplots(subplot_shape, set_fig=set_fig, subplot=subplot, **kwargs) @@ -354,7 +352,9 @@ def plot_from_u_and_v( if not all(p[i] <= p[i + 1] for i in range(len(p) - 1)): if 'time' in self._ds: self._ds[dsname][p_field] = ( - self._ds[dsname][p_field].rolling(time=smooth_p, min_periods=1, center=True).mean() + self._ds[dsname][p_field] + .rolling(time=smooth_p, min_periods=1, center=True) + .mean() ) p = self._ds[dsname][p_field] @@ -450,7 +450,9 @@ def plot_from_u_and_v( self.SkewT[subplot_index].plot_dry_adiabats(pressure=plp, t0=t0, **dry_adiabats_kwargs) if plot_moist_adiabats: - self.SkewT[subplot_index].plot_moist_adiabats(t0=t0, pressure=plp, **moist_adiabats_kwargs) + self.SkewT[subplot_index].plot_moist_adiabats( + t0=t0, pressure=plp, **moist_adiabats_kwargs + ) if plot_mixing_lines: self.SkewT[subplot_index].plot_mixing_lines(pressure=plp, **mixing_lines_kwargs) @@ -458,7 +460,7 @@ def plot_from_u_and_v( # Set Title if set_title is None: if 'time' in self._ds[dsname]: - title_time = dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[0]), + title_time = (dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[0]),) elif '_file_dates' in self._ds[dsname].attrs: title_time = self._ds[dsname].attrs['_file_dates'][0] else: diff --git a/act/plotting/timeseriesdisplay.py b/act/plotting/timeseriesdisplay.py index 70aee57dce..f329d35171 100644 --- a/act/plotting/timeseriesdisplay.py +++ b/act/plotting/timeseriesdisplay.py @@ -151,7 +151,9 @@ def day_night_background(self, dsname=None, subplot_index=(0,)): for value, name in zip(lat_lon_list, ['Latitude', 'Longitude']): if not np.isfinite(value): - warnings.warn(f"{name} value in dataset equal to '{value}' is not finite. ", RuntimeWarning) + warnings.warn( + f"{name} value in dataset equal to '{value}' is not finite. ", RuntimeWarning + ) return lat = lat_lon_list[0] @@ -215,13 +217,17 @@ def set_xrng(self, xrng, subplot_index=(0, 0)): # This is to catch that and expand the range so we avoid the warning. if xrng[0] == xrng[1]: if isinstance(xrng[0], np.datetime64): - print(f'\nAttempting to set xlim range to single value {xrng[0]}. ' - 'Expanding range by 2 seconds.\n') + print( + f'\nAttempting to set xlim range to single value {xrng[0]}. ' + 'Expanding range by 2 seconds.\n' + ) xrng[0] -= np.timedelta64(1, 's') xrng[1] += np.timedelta64(1, 's') elif isinstance(xrng[0], dt.datetime): - print(f'\nAttempting to set xlim range to single value {xrng[0]}. ' - 'Expanding range by 2 seconds.\n') + print( + f'\nAttempting to set xlim range to single value {xrng[0]}. ' + 'Expanding range by 2 seconds.\n' + ) xrng[0] -= dt.timedelta(seconds=1) xrng[1] += dt.timedelta(seconds=1) self.axes[subplot_index].set_xlim(xrng) @@ -435,10 +441,22 @@ def plot( if cb_friendly: cmap = 'HomeyerRainbow' - assessment_overplot_category_color['Bad'] = (0.9285714285714286, 0.7130901016453677, 0.7130901016453677) - assessment_overplot_category_color['Incorrect'] = (0.9285714285714286, 0.7130901016453677, 0.7130901016453677) - assessment_overplot_category_color['Not Failing'] = (0.0, 0.4240129715562796, 0.4240129715562796), - assessment_overplot_category_color['Acceptable'] = (0.0, 0.4240129715562796, 0.4240129715562796), + assessment_overplot_category_color['Bad'] = ( + 0.9285714285714286, + 0.7130901016453677, + 0.7130901016453677, + ) + assessment_overplot_category_color['Incorrect'] = ( + 0.9285714285714286, + 0.7130901016453677, + 0.7130901016453677, + ) + assessment_overplot_category_color['Not Failing'] = ( + (0.0, 0.4240129715562796, 0.4240129715562796), + ) + assessment_overplot_category_color['Acceptable'] = ( + (0.0, 0.4240129715562796, 0.4240129715562796), + ) # Get data and dimensions data = self._ds[dsname][field] @@ -633,9 +651,7 @@ def plot( ] ) else: - date_result = search( - r'\d{4}-\d{1,2}-\d{1,2}', self._ds[dsname].time.attrs['units'] - ) + date_result = search(r'\d{4}-\d{1,2}-\d{1,2}', self._ds[dsname].time.attrs['units']) if date_result is not None: set_title = ' '.join([dsname, field, 'on', date_result.group(0)]) else: @@ -1183,9 +1199,7 @@ def plot_time_height_xsection_from_1d_data( ax = self.axes[subplot_index] - mesh = ax.pcolormesh( - x_times, y_levels, np.transpose(data), shading=set_shading, **kwargs - ) + mesh = ax.pcolormesh(x_times, y_levels, np.transpose(data), shading=set_shading, **kwargs) if day_night_background is True: self.day_night_background(subplot_index=subplot_index, dsname=dsname) @@ -1366,9 +1380,7 @@ def time_height_scatter( ] ) else: - date_result = search( - r'\d{4}-\d{1,2}-\d{1,2}', self._ds[dsname].time.attrs['units'] - ) + date_result = search(r'\d{4}-\d{1,2}-\d{1,2}', self._ds[dsname].time.attrs['units']) if date_result is not None: set_title = ' '.join([dsname, data_field, 'on', date_result.group(0)]) else: @@ -1646,7 +1658,6 @@ def qc_flag_block_plot( ) else: - test_nums = [] for ii, assess in enumerate(flag_assessments): if assess not in color_lookup: @@ -1664,9 +1675,7 @@ def qc_flag_block_plot( # Get test number from flag_mask bitpacked number test_nums.append(parse_bit(flag_masks[ii])) # Get masked array data to use mask for finding if/where test is set - data = self._ds[dsname].qcfilter.get_masked_data( - data_field, rm_tests=test_nums[-1] - ) + data = self._ds[dsname].qcfilter.get_masked_data(data_field, rm_tests=test_nums[-1]) if np.any(data.mask): # Get time ranges from time and masked data barh_list = reduce_time_ranges( diff --git a/act/plotting/windrosedisplay.py b/act/plotting/windrosedisplay.py index ef128c2ccc..fe99f41c62 100644 --- a/act/plotting/windrosedisplay.py +++ b/act/plotting/windrosedisplay.py @@ -36,8 +36,7 @@ class and has therefore has the same attributes as that class. """ def __init__(self, ds, subplot_shape=(1,), ds_name=None, **kwargs): - super().__init__(ds, subplot_shape, ds_name, subplot_kw=dict(projection='polar'), - **kwargs) + super().__init__(ds, subplot_shape, ds_name, subplot_kw=dict(projection='polar'), **kwargs) def set_thetarng(self, trng=(0.0, 360.0), subplot_index=(0,)): """ @@ -223,18 +222,14 @@ def plot( **kwargs, ) ) - ax.legend( - loc=legend_loc, bbox_to_anchor=legend_bbox, title=legend_title - ) + ax.legend(loc=legend_loc, bbox_to_anchor=legend_bbox, title=legend_title) ax.set_theta_zero_location('N') ax.set_theta_direction(-1) # Add an annulus with text stating % of time calm pct_calm = np.sum(spd_data <= calm_threshold) / len(spd_data) * 100 ax.set_rorigin(-2.5) - ax.annotate( - '%3.2f%%\n calm' % pct_calm, xy=(0, -2.5), ha='center', va='center' - ) + ax.annotate('%3.2f%%\n calm' % pct_calm, xy=(0, -2.5), ha='center', va='center') # Set the ticks to be nice numbers tick_max = tick_interval * round(np.nanmax(np.cumsum(wind_hist, axis=1)) / tick_interval) @@ -353,10 +348,10 @@ def plot_data( for i, d in enumerate(dir_bins_mid): if i < len(dir_bins_mid) - 1: idx = np.where((dir_data > d) & (dir_data <= dir_bins_mid[i + 1]))[0] - bins.append(d + (dir_bins_mid[i + 1] - d) / 2.) + bins.append(d + (dir_bins_mid[i + 1] - d) / 2.0) else: - idx = np.where((dir_data > d) & (dir_data <= 360.))[0] - bins.append(d + (360. - d) / 2.) + idx = np.where((dir_data > d) & (dir_data <= 360.0))[0] + bins.append(d + (360.0 - d) / 2.0) if plot_type == 'line': if line_plot_calc == 'mean': @@ -398,8 +393,12 @@ def plot_data( ) hist = np.insert(hist, -1, hist[0], axis=0) cplot = self.axes[subplot_index].contourf( - np.deg2rad(xedges), yedges[0:-1], np.transpose(hist), - cmap=cmap, levels=clevels, **kwargs + np.deg2rad(xedges), + yedges[0:-1], + np.transpose(hist), + cmap=cmap, + levels=clevels, + **kwargs, ) plot_type_str = 'Heatmap of' cbar = self.fig.colorbar(cplot, ax=self.axes[subplot_index]) @@ -447,8 +446,13 @@ def plot_data( clevels = np.linspace(vmin, vmax, clevels) cplot = self.axes[subplot_index].contourf( - np.deg2rad(bins), spd_bins, np.transpose(mean_data), - cmap=cmap, levels=clevels, extend='both', **kwargs + np.deg2rad(bins), + spd_bins, + np.transpose(mean_data), + cmap=cmap, + levels=clevels, + extend='both', + **kwargs, ) plot_type_str = 'Mean of' cbar = self.fig.colorbar(cplot, ax=self.axes[subplot_index]) @@ -461,8 +465,8 @@ def plot_data( self.axes[subplot_index].set_theta_direction(-1) # Set Title - sdate = dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[0]), - edate = dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[-1]), + sdate = (dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[0]),) + edate = (dt_utils.numpy_to_arm_date(self._ds[dsname].time.values[-1]),) if sdate == edate: date_str = 'on ' + sdate[0] @@ -474,13 +478,7 @@ def plot_data( units = '' if set_title is None: set_title = ' '.join( - [ - plot_type_str, - data_field + ' (' + units + ')', - 'by\n', - dir_field, - date_str - ] + [plot_type_str, data_field + ' (' + units + ')', 'by\n', dir_field, date_str] ) self.axes[subplot_index].set_title(set_title) plt.tight_layout(h_pad=1.05) diff --git a/act/qc/add_supplemental_qc.py b/act/qc/add_supplemental_qc.py index 2d94f15a2d..21e463bc94 100644 --- a/act/qc/add_supplemental_qc.py +++ b/act/qc/add_supplemental_qc.py @@ -2,7 +2,6 @@ import numpy as np from pathlib import Path from dateutil import parser -from os import environ # Example of the YAML file and how to construct. # The times are set as inclusive start to inclusive end time. @@ -66,9 +65,8 @@ def read_yaml_supplemental_qc( datetime64=True, time_delim=(';', ',', '|', r'\t'), none_if_empty=True, - quiet=False + quiet=False, ): - """ Returns a dictionary converstion of YAML file for flagging data. The dictionary will contain variable names as first key, assessents as second keys containing @@ -136,7 +134,8 @@ def read_yaml_supplemental_qc( except KeyError: raise RuntimeError( 'Unable to determine datastream name from Dataset. Need to set global attribute ' - '_datastream in Dataset or provided full path to flag file.') + '_datastream in Dataset or provided full path to flag file.' + ) flag_file = list(Path(fullpath).glob(f'{datastream}.yml')) flag_file.extend(list(Path(fullpath).glob(f'{datastream}.yaml'))) @@ -164,7 +163,7 @@ def read_yaml_supplemental_qc( assessments = [ii.capitalize() for ii in assessments] # Read YAML file - with open(flag_file, "r") as fp: + with open(flag_file) as fp: try: data_dict = yaml.load(fp, Loader=yaml.FullLoader) except AttributeError: @@ -230,9 +229,8 @@ def apply_supplemental_qc( assessments=None, apply_all=True, exclude_all_variables=None, - quiet=False + quiet=False, ): - """ Apply flagging from supplemental QC file by adding new QC tests. @@ -284,7 +282,8 @@ def apply_supplemental_qc( exclude_vars.extend(exclude_all_variables) flag_dict = read_yaml_supplemental_qc( - ds, fullpath, variables=variables, assessments=assessments, quiet=quiet) + ds, fullpath, variables=variables, assessments=assessments, quiet=quiet + ) if flag_dict is None: return @@ -301,7 +300,8 @@ def apply_supplemental_qc( indexes = np.array([], dtype=np.int32) for vals in times: ind = np.argwhere( - (ds['time'].values >= vals[0]) & (ds['time'].values <= vals[1])) + (ds['time'].values >= vals[0]) & (ds['time'].values <= vals[1]) + ) if len(ind) > 0: indexes = np.append(indexes, ind) @@ -311,7 +311,8 @@ def apply_supplemental_qc( var_name, index=indexes, test_meaning=description, - test_assessment=asses_name) + test_assessment=asses_name, + ) var_name = '_all' if apply_all and var_name in flag_dict.keys(): @@ -325,7 +326,8 @@ def apply_supplemental_qc( indexes = np.array([], dtype=np.int32) for vals in times: ind = np.argwhere( - (ds['time'].values >= vals[0]) & (ds['time'].values <= vals[1])) + (ds['time'].values >= vals[0]) & (ds['time'].values <= vals[1]) + ) if ind.size > 0: indexes = np.append(indexes, np.ndarray.flatten(ind)) @@ -347,4 +349,5 @@ def apply_supplemental_qc( all_var_name, index=indexes, test_meaning=description, - test_assessment=asses_name) + test_assessment=asses_name, + ) diff --git a/act/qc/arm.py b/act/qc/arm.py index 0fb84597d4..0096befe68 100644 --- a/act/qc/arm.py +++ b/act/qc/arm.py @@ -93,8 +93,10 @@ def add_dqr_to_qc( raise ValueError('Dataset does not have datastream attribute') if datastream == DEFAULT_DATASTREAM_NAME: - raise ValueError("'datastream' name required for DQR service set to default value " - f"{datastream}. Unable to perform DQR service query.") + raise ValueError( + "'datastream' name required for DQR service set to default value " + f"{datastream}. Unable to perform DQR service query." + ) # Clean up QC to conform to CF conventions if cleanup_qc: @@ -163,8 +165,10 @@ def add_dqr_to_qc( } if dqr_link: - print(f"{dqr_number} - {quality_category.lower().capitalize()}: " - f"https://adc.arm.gov/ArchiveServices/DQRService?dqrid={dqr_number}") + print( + f"{dqr_number} - {quality_category.lower().capitalize()}: " + f"https://adc.arm.gov/ArchiveServices/DQRService?dqrid={dqr_number}" + ) # Check to ensure variable is list if variable and not isinstance(variable, (list, tuple)): @@ -173,7 +177,6 @@ def add_dqr_to_qc( loc_vars = ['lat', 'lon', 'alt', 'latitude', 'longitude', 'altitude'] for key, value in dqr_results.items(): for var_name in value['variables']: - # Do not process on location variables if skip_location_vars and var_name in loc_vars: continue @@ -187,7 +190,8 @@ def add_dqr_to_qc( var_name, index=np.unique(value['index']), test_meaning=value['test_meaning'], - test_assessment=value['test_assessment']) + test_assessment=value['test_assessment'], + ) except KeyError: # Variable name not in Dataset continue diff --git a/act/qc/bsrn_tests.py b/act/qc/bsrn_tests.py index c585d01770..1e3c5e4ef4 100644 --- a/act/qc/bsrn_tests.py +++ b/act/qc/bsrn_tests.py @@ -45,11 +45,12 @@ def _calculate_solar_parameters(ds, lat_name, lon_name, solar_constant): # Calculate solar parameters elevation, _, solar_distance = get_solar_azimuth_elevation( - latitude=latitude, longitude=longitude, time=ds['time'].values) + latitude=latitude, longitude=longitude, time=ds['time'].values + ) solar_distance = np.nanmean(solar_distance) Sa = solar_constant / solar_distance**2 - sza = 90. - elevation + sza = 90.0 - elevation return (sza, Sa) @@ -117,9 +118,8 @@ def bsrn_limits_test( solar_constant=1366, lat_name='lat', lon_name='lon', - use_dask=False + use_dask=False, ): - """ Method to apply BSRN limits test and add results to ancillary quality control variable. Need to provide variable name for each measurement for the test to be performed. If no @@ -188,71 +188,88 @@ def bsrn_limits_test( test = test.lower() test_names = [ii.lower() for ii in test_names_org] if test not in test_names: - raise ValueError(f"Value of '{test}' in keyword 'test' not recognized. " - f"Must a single value in options {test_names_org}") + raise ValueError( + f"Value of '{test}' in keyword 'test' not recognized. " + f"Must a single value in options {test_names_org}" + ) sza, Sa = _calculate_solar_parameters(self._ds, lat_name, lon_name, solar_constant) if test == test_names[0]: if sw_min_limit is None: - sw_min_limit = -4. + sw_min_limit = -4.0 if lw_min_dn_limit is None: - lw_min_dn_limit = 40. + lw_min_dn_limit = 40.0 if lw_min_up_limit is None: - lw_min_up_limit = 40. + lw_min_up_limit = 40.0 if lw_max_dn_limit is None: - lw_max_dn_limit = 700. + lw_max_dn_limit = 700.0 if lw_max_up_limit is None: - lw_max_up_limit = 900. + lw_max_up_limit = 900.0 elif test == test_names[1]: if sw_min_limit is None: - sw_min_limit = -2. + sw_min_limit = -2.0 if lw_min_dn_limit is None: - lw_min_dn_limit = 60. + lw_min_dn_limit = 60.0 if lw_min_up_limit is None: - lw_min_up_limit = 60. + lw_min_up_limit = 60.0 if lw_max_dn_limit is None: - lw_max_dn_limit = 500. + lw_max_dn_limit = 500.0 if lw_max_up_limit is None: - lw_max_up_limit = 700. + lw_max_up_limit = 700.0 # Global Shortwave downwelling min and max tests if gbl_SW_dn_name is not None: cos_sza = np.cos(np.radians(sza)) - cos_sza[sza > 90.] = 0. + cos_sza[sza > 90.0] = 0.0 if test == test_names[0]: - sw_max_limit = Sa * 1.5 * cos_sza**1.2 + 100. + sw_max_limit = Sa * 1.5 * cos_sza**1.2 + 100.0 elif test == test_names[1]: - sw_max_limit = Sa * 1.2 * cos_sza**1.2 + 50. + sw_max_limit = Sa * 1.2 * cos_sza**1.2 + 50.0 - index_min, index_max = _find_indexes(self._ds, gbl_SW_dn_name, sw_min_limit, sw_max_limit, use_dask) + index_min, index_max = _find_indexes( + self._ds, gbl_SW_dn_name, sw_min_limit, sw_max_limit, use_dask + ) self._ds.qcfilter.add_test( - gbl_SW_dn_name, index=index_min, test_assessment='Bad', - test_meaning=f"Value less than BSRN {test.lower()} limit of {sw_min_limit} W/m^2") + gbl_SW_dn_name, + index=index_min, + test_assessment='Bad', + test_meaning=f"Value less than BSRN {test.lower()} limit of {sw_min_limit} W/m^2", + ) self._ds.qcfilter.add_test( - gbl_SW_dn_name, index=index_max, test_assessment='Bad', - test_meaning=f"Value greater than BSRN {test.lower()} limit") + gbl_SW_dn_name, + index=index_max, + test_assessment='Bad', + test_meaning=f"Value greater than BSRN {test.lower()} limit", + ) # Diffuse Shortwave downwelling min and max tests if glb_diffuse_SW_dn_name is not None: with warnings.catch_warnings(): warnings.filterwarnings('ignore', category=RuntimeWarning) if test == test_names[0]: - sw_max_limit = Sa * 0.95 * np.cos(np.radians(sza))**1.2 + 50. + sw_max_limit = Sa * 0.95 * np.cos(np.radians(sza)) ** 1.2 + 50.0 elif test == test_names[1]: - sw_max_limit = Sa * 0.75 * np.cos(np.radians(sza))**1.2 + 30. + sw_max_limit = Sa * 0.75 * np.cos(np.radians(sza)) ** 1.2 + 30.0 - index_min, index_max = _find_indexes(self._ds, glb_diffuse_SW_dn_name, sw_min_limit, - sw_max_limit, use_dask) + index_min, index_max = _find_indexes( + self._ds, glb_diffuse_SW_dn_name, sw_min_limit, sw_max_limit, use_dask + ) self._ds.qcfilter.add_test( - glb_diffuse_SW_dn_name, index=index_min, test_assessment='Bad', - test_meaning=f"Value less than BSRN {test.lower()} limit of {sw_min_limit} W/m^2") + glb_diffuse_SW_dn_name, + index=index_min, + test_assessment='Bad', + test_meaning=f"Value less than BSRN {test.lower()} limit of {sw_min_limit} W/m^2", + ) self._ds.qcfilter.add_test( - glb_diffuse_SW_dn_name, index=index_max, test_assessment='Bad', - test_meaning=f"Value greater than BSRN {test.lower()} limit") + glb_diffuse_SW_dn_name, + index=index_max, + test_assessment='Bad', + test_meaning=f"Value greater than BSRN {test.lower()} limit", + ) # Direct Normal Shortwave downwelling min and max tests if direct_normal_SW_dn_name is not None: @@ -261,17 +278,24 @@ def bsrn_limits_test( if test == test_names[0]: sw_max_limit = Sa elif test == test_names[1]: - sw_max_limit = Sa * 0.95 * np.cos(np.radians(sza))**0.2 + 10. + sw_max_limit = Sa * 0.95 * np.cos(np.radians(sza)) ** 0.2 + 10.0 - index_min, index_max = _find_indexes(self._ds, direct_normal_SW_dn_name, - sw_min_limit, sw_max_limit, use_dask) + index_min, index_max = _find_indexes( + self._ds, direct_normal_SW_dn_name, sw_min_limit, sw_max_limit, use_dask + ) self._ds.qcfilter.add_test( - direct_normal_SW_dn_name, index=index_min, test_assessment='Bad', - test_meaning=f"Value less than BSRN {test.lower()} limit of {sw_min_limit} W/m^2") + direct_normal_SW_dn_name, + index=index_min, + test_assessment='Bad', + test_meaning=f"Value less than BSRN {test.lower()} limit of {sw_min_limit} W/m^2", + ) self._ds.qcfilter.add_test( - direct_normal_SW_dn_name, index=index_max, test_assessment='Bad', - test_meaning=f"Value greater than BSRN {test.lower()} limit") + direct_normal_SW_dn_name, + index=index_max, + test_assessment='Bad', + test_meaning=f"Value greater than BSRN {test.lower()} limit", + ) # Direct Shortwave downwelling min and max tests if direct_SW_dn_name is not None: @@ -280,64 +304,92 @@ def bsrn_limits_test( if test == test_names[0]: sw_max_limit = Sa * np.cos(np.radians(sza)) elif test == test_names[1]: - sw_max_limit = Sa * 0.95 * np.cos(np.radians(sza))**1.2 + 10 + sw_max_limit = Sa * 0.95 * np.cos(np.radians(sza)) ** 1.2 + 10 - index_min, index_max = _find_indexes(self._ds, direct_SW_dn_name, - sw_min_limit, sw_max_limit, use_dask) + index_min, index_max = _find_indexes( + self._ds, direct_SW_dn_name, sw_min_limit, sw_max_limit, use_dask + ) self._ds.qcfilter.add_test( - direct_SW_dn_name, index=index_min, test_assessment='Bad', - test_meaning=f"Value less than BSRN {test.lower()} limit of {sw_min_limit} W/m^2") + direct_SW_dn_name, + index=index_min, + test_assessment='Bad', + test_meaning=f"Value less than BSRN {test.lower()} limit of {sw_min_limit} W/m^2", + ) self._ds.qcfilter.add_test( - direct_SW_dn_name, index=index_max, test_assessment='Bad', - test_meaning=f"Value greater than BSRN {test.lower()} limit") + direct_SW_dn_name, + index=index_max, + test_assessment='Bad', + test_meaning=f"Value greater than BSRN {test.lower()} limit", + ) # Shortwave up welling min and max tests if glb_SW_up_name is not None: with warnings.catch_warnings(): warnings.filterwarnings('ignore', category=RuntimeWarning) if test == test_names[0]: - sw_max_limit = Sa * 1.2 * np.cos(np.radians(sza))**1.2 + 50 + sw_max_limit = Sa * 1.2 * np.cos(np.radians(sza)) ** 1.2 + 50 elif test == test_names[1]: - sw_max_limit = Sa * np.cos(np.radians(sza))**1.2 + 50 + sw_max_limit = Sa * np.cos(np.radians(sza)) ** 1.2 + 50 - index_min, index_max = _find_indexes(self._ds, glb_SW_up_name, - sw_min_limit, sw_max_limit, use_dask) + index_min, index_max = _find_indexes( + self._ds, glb_SW_up_name, sw_min_limit, sw_max_limit, use_dask + ) self._ds.qcfilter.add_test( - glb_SW_up_name, index=index_min, test_assessment='Bad', - test_meaning=f"Value less than BSRN {test.lower()} limit of {sw_min_limit} W/m^2") + glb_SW_up_name, + index=index_min, + test_assessment='Bad', + test_meaning=f"Value less than BSRN {test.lower()} limit of {sw_min_limit} W/m^2", + ) self._ds.qcfilter.add_test( - glb_SW_up_name, index=index_max, test_assessment='Bad', - test_meaning=f"Value greater than BSRN {test.lower()} limit") + glb_SW_up_name, + index=index_max, + test_assessment='Bad', + test_meaning=f"Value greater than BSRN {test.lower()} limit", + ) # Longwave downwelling min and max tests if glb_LW_dn_name is not None: - index_min, index_max = _find_indexes(self._ds, glb_LW_dn_name, - lw_min_dn_limit, lw_max_dn_limit, use_dask) + index_min, index_max = _find_indexes( + self._ds, glb_LW_dn_name, lw_min_dn_limit, lw_max_dn_limit, use_dask + ) self._ds.qcfilter.add_test( - glb_LW_dn_name, index=index_min, test_assessment='Bad', - test_meaning=f"Value less than BSRN {test.lower()} limit of {lw_min_dn_limit} W/m^2") + glb_LW_dn_name, + index=index_min, + test_assessment='Bad', + test_meaning=f"Value less than BSRN {test.lower()} limit of {lw_min_dn_limit} W/m^2", + ) self._ds.qcfilter.add_test( - glb_LW_dn_name, index=index_max, test_assessment='Bad', - test_meaning=f"Value greater than BSRN {test.lower()} limit of {lw_max_dn_limit} W/m^2") + glb_LW_dn_name, + index=index_max, + test_assessment='Bad', + test_meaning=f"Value greater than BSRN {test.lower()} limit of {lw_max_dn_limit} W/m^2", + ) # Longwave upwelling min and max tests if glb_LW_up_name is not None: - index_min, index_max = _find_indexes(self._ds, glb_LW_up_name, - lw_min_up_limit, lw_max_up_limit, use_dask) + index_min, index_max = _find_indexes( + self._ds, glb_LW_up_name, lw_min_up_limit, lw_max_up_limit, use_dask + ) self._ds.qcfilter.add_test( - glb_LW_up_name, index=index_min, test_assessment='Bad', - test_meaning=f"Value less than BSRN {test.lower()} limit of {lw_min_up_limit} W/m^2") + glb_LW_up_name, + index=index_min, + test_assessment='Bad', + test_meaning=f"Value less than BSRN {test.lower()} limit of {lw_min_up_limit} W/m^2", + ) self._ds.qcfilter.add_test( - glb_LW_up_name, index=index_max, test_assessment='Bad', - test_meaning=f"Value greater than BSRN {test.lower()} limit of {lw_max_up_limit} W/m^2") + glb_LW_up_name, + index=index_max, + test_assessment='Bad', + test_meaning=f"Value greater than BSRN {test.lower()} limit of {lw_max_up_limit} W/m^2", + ) def bsrn_comparison_tests( self, @@ -352,9 +404,9 @@ def bsrn_comparison_tests( test_assessment='Indeterminate', lat_name='lat', lon_name='lon', - LWdn_lt_LWup_component=25., - LWdn_gt_LWup_component=300., - use_dask=False + LWdn_lt_LWup_component=25.0, + LWdn_gt_LWup_component=300.0, + use_dask=False, ): """ Method to apply BSRN comparison tests and add results to ancillary quality control variable. @@ -418,23 +470,36 @@ def bsrn_comparison_tests( if isinstance(test, str): test = [test] - test_options = ['Global over Sum SW Ratio', 'Diffuse Ratio', 'SW up', 'LW down to air temp', - 'LW up to air temp', 'LW down to LW up'] + test_options = [ + 'Global over Sum SW Ratio', + 'Diffuse Ratio', + 'SW up', + 'LW down to air temp', + 'LW up to air temp', + 'LW down to LW up', + ] solar_constant = 1360.8 sza, Sa = _calculate_solar_parameters(self._ds, lat_name, lon_name, solar_constant) # Ratio of Global over Sum SW if test_options[0] in test: - if gbl_SW_dn_name is None or glb_diffuse_SW_dn_name is None or direct_normal_SW_dn_name is None: - raise ValueError('Must set keywords gbl_SW_dn_name, glb_diffuse_SW_dn_name, ' - f'direct_normal_SW_dn_name for {test_options[0]} test.') + if ( + gbl_SW_dn_name is None + or glb_diffuse_SW_dn_name is None + or direct_normal_SW_dn_name is None + ): + raise ValueError( + 'Must set keywords gbl_SW_dn_name, glb_diffuse_SW_dn_name, ' + f'direct_normal_SW_dn_name for {test_options[0]} test.' + ) with warnings.catch_warnings(): warnings.filterwarnings('ignore', category=RuntimeWarning) if use_dask and isinstance(self._ds[glb_diffuse_SW_dn_name].data, da.Array): - sum_sw_down = (self._ds[glb_diffuse_SW_dn_name].data - + self._ds[direct_normal_SW_dn_name].data * np.cos(np.radians(sza))) + sum_sw_down = self._ds[glb_diffuse_SW_dn_name].data + self._ds[ + direct_normal_SW_dn_name + ].data * np.cos(np.radians(sza)) sum_sw_down[sum_sw_down < 50] = np.nan ratio = self._ds[gbl_SW_dn_name].data / sum_sw_down index_a = sza < 75 @@ -445,8 +510,9 @@ def bsrn_comparison_tests( index_4 = da.where((ratio < 0.85) & index_b, True, False) index = (index_1 | index_2 | index_3 | index_4).compute() else: - sum_sw_down = (self._ds[glb_diffuse_SW_dn_name].values - + self._ds[direct_normal_SW_dn_name].values * np.cos(np.radians(sza))) + sum_sw_down = self._ds[glb_diffuse_SW_dn_name].values + self._ds[ + direct_normal_SW_dn_name + ].values * np.cos(np.radians(sza)) sum_sw_down[sum_sw_down < 50] = np.nan ratio = self._ds[gbl_SW_dn_name].values / sum_sw_down index_a = sza < 75 @@ -458,18 +524,32 @@ def bsrn_comparison_tests( index = index_1 | index_2 | index_3 | index_4 test_meaning = "Ratio of Global over Sum shortwave larger than expected" - self._ds.qcfilter.add_test(gbl_SW_dn_name, index=index, test_assessment=test_assessment, - test_meaning=test_meaning) - self._ds.qcfilter.add_test(glb_diffuse_SW_dn_name, index=index, test_assessment=test_assessment, - test_meaning=test_meaning) - self._ds.qcfilter.add_test(direct_normal_SW_dn_name, index=index, test_assessment=test_assessment, - test_meaning=test_meaning) + self._ds.qcfilter.add_test( + gbl_SW_dn_name, + index=index, + test_assessment=test_assessment, + test_meaning=test_meaning, + ) + self._ds.qcfilter.add_test( + glb_diffuse_SW_dn_name, + index=index, + test_assessment=test_assessment, + test_meaning=test_meaning, + ) + self._ds.qcfilter.add_test( + direct_normal_SW_dn_name, + index=index, + test_assessment=test_assessment, + test_meaning=test_meaning, + ) # Diffuse Ratio if test_options[1] in test: if gbl_SW_dn_name is None or glb_diffuse_SW_dn_name is None: - raise ValueError('Must set keywords gbl_SW_dn_name, glb_diffuse_SW_dn_name ' - f'for {test_options[1]} test.') + raise ValueError( + 'Must set keywords gbl_SW_dn_name, glb_diffuse_SW_dn_name ' + f'for {test_options[1]} test.' + ) with warnings.catch_warnings(): warnings.filterwarnings('ignore', category=RuntimeWarning) @@ -482,7 +562,9 @@ def bsrn_comparison_tests( index_2 = da.where((ratio >= 1.10) & index_b, True, False) index = (index_1 | index_2).compute() else: - ratio = self._ds[glb_diffuse_SW_dn_name].values / self._ds[gbl_SW_dn_name].values + ratio = ( + self._ds[glb_diffuse_SW_dn_name].values / self._ds[gbl_SW_dn_name].values + ) ratio[self._ds[gbl_SW_dn_name].values < 50] = np.nan index_a = sza < 75 index_1 = (ratio >= 1.05) & index_a @@ -491,104 +573,180 @@ def bsrn_comparison_tests( index = index_1 | index_2 test_meaning = "Ratio of Diffuse Shortwave over Global Shortwave larger than expected" - self._ds.qcfilter.add_test(gbl_SW_dn_name, index=index, test_assessment=test_assessment, - test_meaning=test_meaning) - self._ds.qcfilter.add_test(glb_diffuse_SW_dn_name, index=index, test_assessment=test_assessment, - test_meaning=test_meaning) + self._ds.qcfilter.add_test( + gbl_SW_dn_name, + index=index, + test_assessment=test_assessment, + test_meaning=test_meaning, + ) + self._ds.qcfilter.add_test( + glb_diffuse_SW_dn_name, + index=index, + test_assessment=test_assessment, + test_meaning=test_meaning, + ) # Shortwave up comparison if test_options[2] in test: - if glb_SW_up_name is None or glb_diffuse_SW_dn_name is None or direct_normal_SW_dn_name is None: - raise ValueError('Must set keywords glb_SW_up_name, glb_diffuse_SW_dn_name, ' - f'direct_normal_SW_dn_name for {test_options[2]} test.') + if ( + glb_SW_up_name is None + or glb_diffuse_SW_dn_name is None + or direct_normal_SW_dn_name is None + ): + raise ValueError( + 'Must set keywords glb_SW_up_name, glb_diffuse_SW_dn_name, ' + f'direct_normal_SW_dn_name for {test_options[2]} test.' + ) with warnings.catch_warnings(): warnings.filterwarnings('ignore', category=RuntimeWarning) if use_dask and isinstance(self._ds[glb_diffuse_SW_dn_name].data, da.Array): - sum_sw_down = (self._ds[glb_diffuse_SW_dn_name].data - + self._ds[direct_normal_SW_dn_name].data * np.cos(np.radians(sza))) + sum_sw_down = self._ds[glb_diffuse_SW_dn_name].data + self._ds[ + direct_normal_SW_dn_name + ].data * np.cos(np.radians(sza)) sum_sw_down[sum_sw_down < 50] = np.nan - index = da.where(self._ds[glb_SW_up_name].data > sum_sw_down, True, False).compute() + index = da.where( + self._ds[glb_SW_up_name].data > sum_sw_down, True, False + ).compute() else: - sum_sw_down = (self._ds[glb_diffuse_SW_dn_name].values - + self._ds[direct_normal_SW_dn_name].values * np.cos(np.radians(sza))) + sum_sw_down = self._ds[glb_diffuse_SW_dn_name].values + self._ds[ + direct_normal_SW_dn_name + ].values * np.cos(np.radians(sza)) sum_sw_down[sum_sw_down < 50] = np.nan index = self._ds[glb_SW_up_name].values > sum_sw_down test_meaning = "Ratio of Shortwave Upwelling greater than Shortwave Sum" - self._ds.qcfilter.add_test(glb_SW_up_name, index=index, test_assessment=test_assessment, - test_meaning=test_meaning) - self._ds.qcfilter.add_test(glb_diffuse_SW_dn_name, index=index, test_assessment=test_assessment, - test_meaning=test_meaning) - self._ds.qcfilter.add_test(direct_normal_SW_dn_name, index=index, test_assessment=test_assessment, - test_meaning=test_meaning) + self._ds.qcfilter.add_test( + glb_SW_up_name, + index=index, + test_assessment=test_assessment, + test_meaning=test_meaning, + ) + self._ds.qcfilter.add_test( + glb_diffuse_SW_dn_name, + index=index, + test_assessment=test_assessment, + test_meaning=test_meaning, + ) + self._ds.qcfilter.add_test( + direct_normal_SW_dn_name, + index=index, + test_assessment=test_assessment, + test_meaning=test_meaning, + ) # Longwave down to air temperature comparison if test_options[3] in test: if glb_LW_dn_name is None or air_temp_name is None: - raise ValueError('Must set keywords glb_LW_dn_name, air_temp_name ' - f' for {test_options[3]} test.') - - air_temp = convert_units(self._ds[air_temp_name].values, - self._ds[air_temp_name].attrs['units'], 'degK') + raise ValueError( + 'Must set keywords glb_LW_dn_name, air_temp_name ' + f' for {test_options[3]} test.' + ) + + air_temp = convert_units( + self._ds[air_temp_name].values, self._ds[air_temp_name].attrs['units'], 'degK' + ) if use_dask and isinstance(self._ds[glb_LW_dn_name].data, da.Array): air_temp = da.array(air_temp) conversion = da.array(Stefan_Boltzmann * air_temp**4) index_1 = (0.4 * conversion) > self._ds[glb_LW_dn_name].data - index_2 = (conversion + 25.) < self._ds[glb_LW_dn_name].data + index_2 = (conversion + 25.0) < self._ds[glb_LW_dn_name].data index = (index_1 | index_2).compute() else: conversion = Stefan_Boltzmann * air_temp**4 index_1 = (0.4 * conversion) > self._ds[glb_LW_dn_name].values - index_2 = (conversion + 25.) < self._ds[glb_LW_dn_name].values + index_2 = (conversion + 25.0) < self._ds[glb_LW_dn_name].values index = index_1 | index_2 - test_meaning = "Longwave downwelling comparison to air temperature out side of expected range" - self._ds.qcfilter.add_test(glb_LW_dn_name, index=index, test_assessment=test_assessment, - test_meaning=test_meaning) + test_meaning = ( + "Longwave downwelling comparison to air temperature out side of expected range" + ) + self._ds.qcfilter.add_test( + glb_LW_dn_name, + index=index, + test_assessment=test_assessment, + test_meaning=test_meaning, + ) # Longwave up to air temperature comparison if test_options[4] in test: if glb_LW_up_name is None or air_temp_name is None: - raise ValueError('Must set keywords glb_LW_up_name, air_temp_name ' - f'for {test_options[3]} test.') - - air_temp = convert_units(self._ds[air_temp_name].values, - self._ds[air_temp_name].attrs['units'], 'degK') + raise ValueError( + 'Must set keywords glb_LW_up_name, air_temp_name ' + f'for {test_options[3]} test.' + ) + + air_temp = convert_units( + self._ds[air_temp_name].values, self._ds[air_temp_name].attrs['units'], 'degK' + ) if use_dask and isinstance(self._ds[glb_LW_up_name].data, da.Array): air_temp = da.array(air_temp) - index_1 = (Stefan_Boltzmann * (air_temp - 15)**4) > self._ds[glb_LW_up_name].data - index_2 = (Stefan_Boltzmann * (air_temp + 25)**4) < self._ds[glb_LW_up_name].data + index_1 = (Stefan_Boltzmann * (air_temp - 15) ** 4) > self._ds[glb_LW_up_name].data + index_2 = (Stefan_Boltzmann * (air_temp + 25) ** 4) < self._ds[glb_LW_up_name].data index = (index_1 | index_2).compute() else: - index_1 = (Stefan_Boltzmann * (air_temp - 15)**4) > self._ds[glb_LW_up_name].values - index_2 = (Stefan_Boltzmann * (air_temp + 25)**4) < self._ds[glb_LW_up_name].values + index_1 = (Stefan_Boltzmann * (air_temp - 15) ** 4) > self._ds[ + glb_LW_up_name + ].values + index_2 = (Stefan_Boltzmann * (air_temp + 25) ** 4) < self._ds[ + glb_LW_up_name + ].values index = index_1 | index_2 - test_meaning = "Longwave upwelling comparison to air temperature out side of expected range" - self._ds.qcfilter.add_test(glb_LW_up_name, index=index, test_assessment=test_assessment, - test_meaning=test_meaning) + test_meaning = ( + "Longwave upwelling comparison to air temperature out side of expected range" + ) + self._ds.qcfilter.add_test( + glb_LW_up_name, + index=index, + test_assessment=test_assessment, + test_meaning=test_meaning, + ) # Lonwave down to longwave up comparison if test_options[5] in test: if glb_LW_dn_name is None or glb_LW_up_name is None: - raise ValueError('Must set keywords glb_LW_dn_name, glb_LW_up_name ' - f'for {test_options[3]} test.') + raise ValueError( + 'Must set keywords glb_LW_dn_name, glb_LW_up_name ' + f'for {test_options[3]} test.' + ) if use_dask and isinstance(self._ds[glb_LW_dn_name].data, da.Array): - index_1 = da.where(self._ds[glb_LW_dn_name].data - > (self._ds[glb_LW_up_name].data + LWdn_lt_LWup_component), True, False) - index_2 = da.where(self._ds[glb_LW_dn_name].data - < (self._ds[glb_LW_up_name].data - LWdn_gt_LWup_component), True, False) + index_1 = da.where( + self._ds[glb_LW_dn_name].data + > (self._ds[glb_LW_up_name].data + LWdn_lt_LWup_component), + True, + False, + ) + index_2 = da.where( + self._ds[glb_LW_dn_name].data + < (self._ds[glb_LW_up_name].data - LWdn_gt_LWup_component), + True, + False, + ) index = (index_1 | index_2).compute() else: - index_1 = self._ds[glb_LW_dn_name].values > (self._ds[glb_LW_up_name].values + LWdn_lt_LWup_component) - index_2 = self._ds[glb_LW_dn_name].values < (self._ds[glb_LW_up_name].values - LWdn_gt_LWup_component) + index_1 = self._ds[glb_LW_dn_name].values > ( + self._ds[glb_LW_up_name].values + LWdn_lt_LWup_component + ) + index_2 = self._ds[glb_LW_dn_name].values < ( + self._ds[glb_LW_up_name].values - LWdn_gt_LWup_component + ) index = index_1 | index_2 - test_meaning = "Lonwave downwelling compared to longwave upwelling outside of expected range" - self._ds.qcfilter.add_test(glb_LW_dn_name, index=index, test_assessment=test_assessment, - test_meaning=test_meaning) - self._ds.qcfilter.add_test(glb_LW_up_name, index=index, test_assessment=test_assessment, - test_meaning=test_meaning) + test_meaning = ( + "Lonwave downwelling compared to longwave upwelling outside of expected range" + ) + self._ds.qcfilter.add_test( + glb_LW_dn_name, + index=index, + test_assessment=test_assessment, + test_meaning=test_meaning, + ) + self._ds.qcfilter.add_test( + glb_LW_up_name, + index=index, + test_assessment=test_assessment, + test_meaning=test_meaning, + ) diff --git a/act/qc/clean.py b/act/qc/clean.py index c84a30efef..00ab5f8cc5 100644 --- a/act/qc/clean.py +++ b/act/qc/clean.py @@ -198,7 +198,6 @@ def handle_missing_values(self, default_missing_value=np.int32(-9999)): np.dtype('float32'), np.dtype('float64'), ]: - # Look at units variable to see if this is the stupid way some # ARM products mix data and state variables. If the units are not # in the normal list of unitless type assume this is a data variable @@ -437,7 +436,7 @@ def get_attr_info(self, variable=None, flag=False): 'Value is equal to missing_value.', 'Value is less than the valid_min.', 'Value is greater than the valid_max.', - 'Difference between current and previous values exceeds valid_delta.' + 'Difference between current and previous values exceeds valid_delta.', ] return_dict['flag_tests'] = [1, 2, 3, 4] return_dict['flag_masks'] = [1, 2, 4, 8] @@ -452,7 +451,7 @@ def get_attr_info(self, variable=None, flag=False): 'bit_3_description', 'bit_3_assessment', 'bit_4_description', - 'bit_4_assessment' + 'bit_4_assessment', ] return return_dict @@ -493,10 +492,8 @@ def clean_arm_state_variables( for var in variables: flag_info = self.get_attr_info(variable=var, flag=integer_flag) if flag_info is not None: - # Add new attributes to variable for attr in ['flag_values', 'flag_meanings', 'flag_masks']: - if len(flag_info[attr]) > 0: # Only add if attribute does not exist. if attr in self._ds[var].attrs.keys() is False: @@ -596,7 +593,7 @@ def link_variables(self): continue # Skip data quality fields. try: - if not ('Quality check results on field:' in self._ds[var].attrs['long_name']): + if "Quality check results on field:" not in self._ds[var].attrs["long_name"]: continue except KeyError: pass @@ -626,7 +623,7 @@ def clean_arm_qc( clean_units_string=True, correct_valid_min_max=True, remove_unset_global_tests=True, - **kwargs + **kwargs, ): """ Method to clean up Xarray dataset QC variables. @@ -652,7 +649,6 @@ def clean_arm_qc( """ global_qc = self.get_attr_info() for qc_var in self.matched_qc_variables: - # Clean up units attribute from unitless to udunits '1' try: if clean_units_string and self._ds[qc_var].attrs['units'] == 'unitless': @@ -719,7 +715,6 @@ def clean_arm_qc( flag_masks = self._ds[qc_var_name].attrs['flag_masks'] tests_to_remove = [] for ii, flag_meaning in enumerate(flag_meanings): - # Loop over usual test attribute names looking to see if they # are listed in test description. If so use that name for look up. test_attribute_limit_name = None @@ -760,7 +755,6 @@ def normalize_assessment( exclude_variables=None, qc_lookup={'Incorrect': 'Bad', 'Suspect': 'Indeterminate'}, ): - """ Method to clean up assessment terms used to be consistent between embedded QC and DQRs. diff --git a/act/qc/comparison_tests.py b/act/qc/comparison_tests.py index 731eae0230..3b13659ac1 100644 --- a/act/qc/comparison_tests.py +++ b/act/qc/comparison_tests.py @@ -3,7 +3,6 @@ """ import copy -import warnings import numpy as np import xarray as xr diff --git a/act/qc/qcfilter.py b/act/qc/qcfilter.py index 6137db9f92..aeecbe2a85 100644 --- a/act/qc/qcfilter.py +++ b/act/qc/qcfilter.py @@ -26,13 +26,7 @@ def __init__(self, ds): """initialize""" self._ds = ds - def check_for_ancillary_qc( - self, - var_name, - add_if_missing=True, - cleanup=False, - flag_type=False - ): + def check_for_ancillary_qc(self, var_name, add_if_missing=True, cleanup=False, flag_type=False): """ Method to check if a quality control variable exist in the dataset and return the quality control varible name. @@ -119,10 +113,7 @@ def check_for_ancillary_qc( return qc_var_name def create_qc_variable( - self, var_name, - flag_type=False, - flag_values_set_value=0, - qc_var_name=None + self, var_name, flag_type=False, flag_values_set_value=0, qc_var_name=None ): """ Method to create a quality control variable in the dataset. @@ -206,9 +197,7 @@ def create_qc_variable( # Update if using flag_values and don't want 0 to be default value. if flag_type and flag_values_set_value != 0: - self._ds[qc_var_name].values = self._ds[qc_var_name].values + int( - flag_values_set_value - ) + self._ds[qc_var_name].values = self._ds[qc_var_name].values + int(flag_values_set_value) # Add requried variable attributes. if flag_type: @@ -260,7 +249,6 @@ def update_ancillary_variable(self, var_name, qc_var_name=None): try: ancillary_variables = self._ds[var_name].attrs['ancillary_variables'] if qc_var_name not in ancillary_variables: - ancillary_variables = ' '.join([ancillary_variables, qc_var_name]) except KeyError: ancillary_variables = qc_var_name @@ -805,7 +793,6 @@ def get_masked_data( ma_fill_value=None, return_inverse=False, ): - """ Returns a numpy masked array containing data and mask or a numpy float array with masked values set to NaN. @@ -1030,25 +1017,25 @@ def datafilter( except KeyError: pass - print(f'No quality control variable for {var_name} found ' - f'in call to .qcfilter.datafilter()') + print( + f'No quality control variable for {var_name} found ' + f'in call to .qcfilter.datafilter()' + ) continue # Need to return data as Numpy array with NaN values. Setting the Dask array # to Numpy masked array does not work with other tools. data = self.get_masked_data( - var_name, - rm_assessments=rm_assessments, - rm_tests=rm_tests, - return_nan_array=True + var_name, rm_assessments=rm_assessments, rm_tests=rm_tests, return_nan_array=True ) # If data was orginally stored as Dask array return values to Dataset as Dask array # else set as Numpy array. try: self._ds[var_name].data = dask.array.from_array( - data, chunks=self._ds[var_name].data.chunksize) + data, chunks=self._ds[var_name].data.chunksize + ) except AttributeError: self._ds[var_name].values = data diff --git a/act/qc/qctests.py b/act/qc/qctests.py index 45bfb7d179..3b9916d1f8 100644 --- a/act/qc/qctests.py +++ b/act/qc/qctests.py @@ -1353,8 +1353,7 @@ def add_iqr_test( from scikit_posthocs import outliers_iqr except ImportError: raise ImportError( - 'scikit_posthocs needs to be installed on your system to ' - 'run add_iqr_test.' + 'scikit_posthocs needs to be installed on your system to ' 'run add_iqr_test.' ) if test_meaning is None: @@ -1452,8 +1451,7 @@ def add_gesd_test( from scikit_posthocs import outliers_gesd except ImportError: raise ImportError( - 'scikit_posthocs needs to be installed on your system to ' - 'run add_gesd_test.' + 'scikit_posthocs needs to be installed on your system to ' 'run add_gesd_test.' ) if test_meaning is None: @@ -1510,7 +1508,7 @@ def add_atmospheric_pressure_test( test_number=None, flag_value=False, prepend_text=None, - use_dask=False + use_dask=False, ): """ Method to perform a limit test on atmospheric pressure data using @@ -1592,8 +1590,10 @@ def add_atmospheric_pressure_test( upper_limit = upper_limit.magnitude if test_meaning is None: - test_meaning = ('Value outside of atmospheric pressure range test range: ' - f'{round(lower_limit, 2)} to {round(upper_limit, 2)} {data_units}') + test_meaning = ( + 'Value outside of atmospheric pressure range test range: ' + f'{round(lower_limit, 2)} to {round(upper_limit, 2)} {data_units}' + ) if prepend_text is not None: test_meaning = ': '.join((prepend_text, test_meaning)) @@ -1605,7 +1605,9 @@ def add_atmospheric_pressure_test( index2 = da.where(self._ds[var_name].data > upper_limit, True, False) index = (index1 | index2).compute() else: - index = (self._ds[var_name].values > upper_limit) | (self._ds[var_name].values < lower_limit) + index = (self._ds[var_name].values > upper_limit) | ( + self._ds[var_name].values < lower_limit + ) result = self._ds.qcfilter.add_test( var_name, diff --git a/act/qc/radiometer_tests.py b/act/qc/radiometer_tests.py index 34ba7687c2..6429aef15b 100644 --- a/act/qc/radiometer_tests.py +++ b/act/qc/radiometer_tests.py @@ -3,7 +3,6 @@ """ -import datetime import warnings import dask @@ -13,7 +12,7 @@ from scipy.fftpack import rfft, rfftfreq from act.utils.datetime_utils import determine_time_delta -from act.utils.geo_utils import get_sunrise_sunset_noon, is_sun_visible +from act.utils.geo_utils import is_sun_visible def fft_shading_test( diff --git a/act/qc/sp2.py b/act/qc/sp2.py index 9a67123e32..526405408e 100644 --- a/act/qc/sp2.py +++ b/act/qc/sp2.py @@ -119,7 +119,8 @@ def __init__(self): 'Attempting to use SP2ParticleCriteria without' 'PySP2 installed. SP2ParticleCriteria will' 'not have any functionality besides this' - 'warning message.', RuntimeWarning + 'warning message.', + RuntimeWarning, ) diff --git a/act/retrievals/cbh.py b/act/retrievals/cbh.py index dd7aa0c04f..f68f47e52c 100644 --- a/act/retrievals/cbh.py +++ b/act/retrievals/cbh.py @@ -16,7 +16,7 @@ def generic_sobel_cbh( fill_na=None, return_thresh=False, filter_type='uniform', - edge_thresh=5., + edge_thresh=5.0, ): """ Function for calculating cloud base height from lidar/radar data diff --git a/act/retrievals/doppler_lidar.py b/act/retrievals/doppler_lidar.py index 0e5dd85154..53a9619681 100644 --- a/act/retrievals/doppler_lidar.py +++ b/act/retrievals/doppler_lidar.py @@ -132,9 +132,16 @@ def compute_winds_from_ppi( task.append( dask.delayed(process_ppi_winds)( - time[scan_index], elevation[scan_index], azimuth[scan_index], snr[scan_index, :], - doppler[scan_index, :], rng, condition_limit, snr_threshold, remove_all_missing, - height_units + time[scan_index], + elevation[scan_index], + azimuth[scan_index], + snr[scan_index, :], + doppler[scan_index, :], + rng, + condition_limit, + snr_threshold, + remove_all_missing, + height_units, ) ) @@ -144,7 +151,9 @@ def compute_winds_from_ppi( results = [results[ii] for ii, value in enumerate(is_Dataset) if value is True] new_ds = xr.concat(results, 'time') - if isinstance(return_ds, xr.core.dataset.Dataset) and isinstance(new_ds, xr.core.dataset.Dataset): + if isinstance(return_ds, xr.core.dataset.Dataset) and isinstance( + new_ds, xr.core.dataset.Dataset + ): return_ds = xr.concat([return_ds, new_ds], dim='time') else: return_ds = new_ds @@ -152,8 +161,18 @@ def compute_winds_from_ppi( return return_ds -def process_ppi_winds(time, elevation, azimuth, snr, doppler, rng, condition_limit, - snr_threshold, remove_all_missing, height_units): +def process_ppi_winds( + time, + elevation, + azimuth, + snr, + doppler, + rng, + condition_limit, + snr_threshold, + remove_all_missing, + height_units, +): """ This function is for processing the winds using dask from the compute_winds_from_ppi function. This should not be used standalone. @@ -235,9 +254,7 @@ def process_ppi_winds(time, elevation, azimuth, snr, doppler, rng, condition_lim wdir = np.degrees(np.arctan2(u_wind, v_wind) + np.pi) wspd_err = np.sqrt((u_wind * u_err) ** 2 + (v_wind * v_err) ** 2) / wspd - wdir_err = np.degrees( - np.sqrt((u_wind * v_err) ** 2 + (v_wind * u_err) ** 2) / wspd**2 - ) + wdir_err = np.degrees(np.sqrt((u_wind * v_err) ** 2 + (v_wind * u_err) ** 2) / wspd**2) if remove_all_missing and np.isnan(wspd).all(): return np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan diff --git a/act/retrievals/radiation.py b/act/retrievals/radiation.py index 4d1964ebdd..09c5d1ade3 100644 --- a/act/retrievals/radiation.py +++ b/act/retrievals/radiation.py @@ -7,7 +7,6 @@ import xarray as xr from scipy.constants import Stefan_Boltzmann -from act.utils.datetime_utils import datetime64_to_datetime from act.utils.geo_utils import get_solar_azimuth_elevation @@ -59,7 +58,7 @@ def calculate_dsh_from_dsdh_sdn( attrs={ 'long_name': 'Derived Downwelling Shortwave Hemispheric Irradiance', 'units': 'W/m^2', - } + }, ) return ds @@ -144,7 +143,6 @@ def calculate_net_radiation( dlhs='down_long_hemisp_shaded', smooth=None, ): - """ Function to calculate the net radiation from upwelling short and long-wave irradiance and @@ -207,7 +205,6 @@ def calculate_longwave_radiation( emiss_a=0.61, emiss_b=0.06, ): - """ Function to calculate longwave radiation during clear and cloudy sky conditions @@ -249,7 +246,6 @@ def calculate_longwave_radiation( """ if met_ds is not None: - T = met_ds[temperature_var] + 273.15 # C to K e = met_ds[vapor_pressure_var] * 10.0 # kpa to hpa else: diff --git a/act/retrievals/sonde.py b/act/retrievals/sonde.py index 242b7f9abb..01cf3ce045 100644 --- a/act/retrievals/sonde.py +++ b/act/retrievals/sonde.py @@ -3,9 +3,7 @@ """ -import warnings import numpy as np -import pandas as pd import xarray as xr from operator import itemgetter from itertools import groupby @@ -174,9 +172,7 @@ def calculate_stability_indicies( ds['parcel_temperature'].attrs['units'] = t_profile.units # Calculate CAPE, CIN, LCL - sbcape, sbcin = mpcalc.surface_based_cape_cin(p_sorted, - t_sorted, - td_sorted) + sbcape, sbcin = mpcalc.surface_based_cape_cin(p_sorted, t_sorted, td_sorted) lcl = mpcalc.lcl(p_sorted[0], t_sorted[0], td_sorted[0]) try: @@ -277,8 +273,14 @@ def calculate_pbl_liu_liang( """ # Preprocess the sonde data to ensure the same methods across all retrievals - ds2 = preprocess_sonde_data(ds, temperature=temperature, pressure=pressure, - height=height, smooth_height=smooth_height, base=5.) + ds2 = preprocess_sonde_data( + ds, + temperature=temperature, + pressure=pressure, + height=height, + smooth_height=smooth_height, + base=5.0, + ) pres = ds2[pressure].values wspd = ds2[windspeed].values @@ -417,7 +419,7 @@ def calculate_pbl_heffter( pressure='pres', height='alt', smooth_height=3, - base=5., + base=5.0, ): """ Function for calculating the PBL height from a radiosonde profile @@ -460,8 +462,14 @@ def calculate_pbl_heffter( """ # Preprocess the sonde data to ensure the same methods across all retrievals - ds2 = preprocess_sonde_data(ds, temperature=temperature, pressure=pressure, - height=height, smooth_height=smooth_height, base=base) + ds2 = preprocess_sonde_data( + ds, + temperature=temperature, + pressure=pressure, + height=height, + smooth_height=smooth_height, + base=base, + ) # Get data pres = ds2[pressure].values @@ -496,25 +504,25 @@ def calculate_pbl_heffter( # For each layer, calculate the difference in theta from # top and bottom of the layer. The lowest layer where the # difference is > 2 K is set as the PBL. - pbl = 0. + pbl = 0.0 theta_diff_layer = [] bottom_inversion = [] top_inversion = [] for r in ranges: - if agl[r[1]] > 4000.: + if agl[r[1]] > 4000.0: continue theta_diff = theta[r[1]] - theta[r[0]] theta_diff_layer.append(theta_diff) bottom_inversion.append(alt[r[0]]) top_inversion.append(alt[r[1]]) - if pbl == 0. and theta_diff > 2.0: + if pbl == 0.0 and theta_diff > 2.0: pbl = alt[r[0]] if len(theta_diff_layer) == 0: - pbl = -9999. + pbl = -9999.0 # If PBL is not set, set it to the layer with the max theta diff - if pbl == 0.: + if pbl == 0.0: idx = np.argmax(theta_diff_layer) pbl = bottom_inversion[idx] @@ -536,11 +544,21 @@ def calculate_pbl_heffter( ds['alt_ss'] = da atts = {'units': 'm', 'long_name': 'Bottom height of inversion layers'} - da = xr.DataArray(bottom_inversion, coords={'layers': list(range(len(bottom_inversion)))}, dims=['layers'], attrs=atts) + da = xr.DataArray( + bottom_inversion, + coords={'layers': list(range(len(bottom_inversion)))}, + dims=['layers'], + attrs=atts, + ) ds['bottom_inversion'] = da atts = {'units': 'm', 'long_name': 'Top height of inversion layers'} - da = xr.DataArray(top_inversion, coords={'layers': list(range(len(top_inversion)))}, dims=['layers'], attrs=atts) + da = xr.DataArray( + top_inversion, + coords={'layers': list(range(len(top_inversion)))}, + dims=['layers'], + attrs=atts, + ) ds['top_inversion'] = da return ds @@ -552,7 +570,7 @@ def preprocess_sonde_data( pressure='pres', height='alt', smooth_height=3, - base=5., + base=5.0, ): """ Function for processing the SONDE data for the PBL calculations. @@ -628,7 +646,7 @@ def preprocess_sonde_data( temp = ds2[temperature].values # Perform Pre-processing checks - if len(temp) == 0.: + if len(temp) == 0.0: raise ValueError('No data in profile') if np.nanmax(alt) < 1000.0: diff --git a/act/tests/__init__.py b/act/tests/__init__.py index 9ae16bd6a3..01d4e96d62 100644 --- a/act/tests/__init__.py +++ b/act/tests/__init__.py @@ -55,8 +55,7 @@ 'EXAMPLE_INI', 'EXAMPLE_SP2B', 'EXAMPLE_MET_YAML', - 'EXAMPLE_CLOUDPHASE' - 'EXAMPLE_ECOR', + 'EXAMPLE_CLOUDPHASE' 'EXAMPLE_ECOR', 'EXAMPLE_SEBS', 'EXAMPLE_ENA_MET', 'EXAMPLE_CCN', diff --git a/act/tests/sample_files.py b/act/tests/sample_files.py index d1e1194cc5..973b653015 100644 --- a/act/tests/sample_files.py +++ b/act/tests/sample_files.py @@ -4,7 +4,6 @@ """ -import os from arm_test_data import DATASETS @@ -27,9 +26,7 @@ EXAMPLE_AERI = DATASETS.fetch('sgpaerich1C1.b1.20190501.000342.nc') EXAMPLE_IRTSST = DATASETS.fetch('marirtsstM1.b1.20190320.000000.nc') EXAMPLE_MFRSR = DATASETS.fetch('sgpmfrsr7nchE11.b1.20210329.070000.nc') -EXAMPLE_SURFSPECALB1MLAWER = DATASETS.fetch( - 'nsasurfspecalb1mlawerC1.c1.20160609.080000.nc' -) +EXAMPLE_SURFSPECALB1MLAWER = DATASETS.fetch('nsasurfspecalb1mlawerC1.c1.20160609.080000.nc') EXAMPLE_SIGMA_MPLV5 = DATASETS.fetch('201509021500.bi') EXAMPLE_RL1 = DATASETS.fetch('sgprlC1.a0.20160131.000000.nc') EXAMPLE_CO2FLX4M = DATASETS.fetch('sgpco2flx4mC1.b1.20201007.001500.nc') @@ -49,9 +46,13 @@ EXAMPLE_MET_YAML = DATASETS.fetch('sgpmetE13.b1.yaml') EXAMPLE_CLOUDPHASE = DATASETS.fetch('nsacloudphaseC1.c1.20180601.000000.nc') EXAMPLE_AAF_ICARTT = DATASETS.fetch('AAFNAV_COR_20181104_R0.ict') -EXAMPLE_NEON = DATASETS.fetch('NEON.D18.BARR.DP1.00002.001.000.010.001.SAAT_1min.2022-10.expanded.20221107T205629Z.csv') +EXAMPLE_NEON = DATASETS.fetch( + 'NEON.D18.BARR.DP1.00002.001.000.010.001.SAAT_1min.2022-10.expanded.20221107T205629Z.csv' +) EXAMPLE_NEON_VARIABLE = DATASETS.fetch('NEON.D18.BARR.DP1.00002.001.variables.20221201T110553Z.csv') -EXAMPLE_NEON_POSITION = DATASETS.fetch('NEON.D18.BARR.DP1.00002.001.sensor_positions.20221107T205629Z.csv') +EXAMPLE_NEON_POSITION = DATASETS.fetch( + 'NEON.D18.BARR.DP1.00002.001.sensor_positions.20221107T205629Z.csv' +) EXAMPLE_DOD = DATASETS.fetch('vdis.b1') EXAMPLE_EBBR1 = DATASETS.fetch('sgp30ebbrE32.b1.20191125.000000.nc') EXAMPLE_EBBR2 = DATASETS.fetch('sgp30ebbrE32.b1.20191130.000000.nc') @@ -67,71 +68,78 @@ EXAMPLE_HYSPLIT = DATASETS.fetch('houstonaug300.0summer2010080100') # Multiple files in a list -dlppi_multi_list = ['sgpdlppiC1.b1.20191015.120023.cdf', - 'sgpdlppiC1.b1.20191015.121506.cdf'] +dlppi_multi_list = ['sgpdlppiC1.b1.20191015.120023.cdf', 'sgpdlppiC1.b1.20191015.121506.cdf'] EXAMPLE_DLPPI_MULTI = [DATASETS.fetch(file) for file in dlppi_multi_list] -noaa_psl_list = ['ayp22199.21m', - 'ayp22200.00m'] +noaa_psl_list = ['ayp22199.21m', 'ayp22200.00m'] EXAMPLE_NOAA_PSL_SURFACEMET = [DATASETS.fetch(file) for file in noaa_psl_list] -met_wildcard_list = ['sgpmetE13.b1.20190101.000000.cdf', - 'sgpmetE13.b1.20190102.000000.cdf', - 'sgpmetE13.b1.20190103.000000.cdf', - 'sgpmetE13.b1.20190104.000000.cdf', - 'sgpmetE13.b1.20190105.000000.cdf', - 'sgpmetE13.b1.20190106.000000.cdf', - 'sgpmetE13.b1.20190107.000000.cdf'] +met_wildcard_list = [ + 'sgpmetE13.b1.20190101.000000.cdf', + 'sgpmetE13.b1.20190102.000000.cdf', + 'sgpmetE13.b1.20190103.000000.cdf', + 'sgpmetE13.b1.20190104.000000.cdf', + 'sgpmetE13.b1.20190105.000000.cdf', + 'sgpmetE13.b1.20190106.000000.cdf', + 'sgpmetE13.b1.20190107.000000.cdf', +] EXAMPLE_MET_WILDCARD = [DATASETS.fetch(file) for file in met_wildcard_list] -met_contour_list = ['sgpmetE15.b1.20190508.000000.cdf', - 'sgpmetE31.b1.20190508.000000.cdf', - 'sgpmetE32.b1.20190508.000000.cdf', - 'sgpmetE33.b1.20190508.000000.cdf', - 'sgpmetE34.b1.20190508.000000.cdf', - 'sgpmetE35.b1.20190508.000000.cdf', - 'sgpmetE36.b1.20190508.000000.cdf', - 'sgpmetE37.b1.20190508.000000.cdf', - 'sgpmetE38.b1.20190508.000000.cdf', - 'sgpmetE39.b1.20190508.000000.cdf', - 'sgpmetE40.b1.20190508.000000.cdf', - 'sgpmetE9.b1.20190508.000000.cdf', - 'sgpmetE13.b1.20190508.000000.cdf'] +met_contour_list = [ + 'sgpmetE15.b1.20190508.000000.cdf', + 'sgpmetE31.b1.20190508.000000.cdf', + 'sgpmetE32.b1.20190508.000000.cdf', + 'sgpmetE33.b1.20190508.000000.cdf', + 'sgpmetE34.b1.20190508.000000.cdf', + 'sgpmetE35.b1.20190508.000000.cdf', + 'sgpmetE36.b1.20190508.000000.cdf', + 'sgpmetE37.b1.20190508.000000.cdf', + 'sgpmetE38.b1.20190508.000000.cdf', + 'sgpmetE39.b1.20190508.000000.cdf', + 'sgpmetE40.b1.20190508.000000.cdf', + 'sgpmetE9.b1.20190508.000000.cdf', + 'sgpmetE13.b1.20190508.000000.cdf', +] EXAMPLE_MET_CONTOUR = [DATASETS.fetch(file) for file in met_contour_list] -twp_sonde_wildcard_list = ['twpsondewnpnC3.b1.20060119.050300.custom.cdf', - 'twpsondewnpnC3.b1.20060119.112000.custom.cdf', - 'twpsondewnpnC3.b1.20060119.163300.custom.cdf', - 'twpsondewnpnC3.b1.20060119.231600.custom.cdf', - 'twpsondewnpnC3.b1.20060120.043800.custom.cdf', - 'twpsondewnpnC3.b1.20060120.111900.custom.cdf', - 'twpsondewnpnC3.b1.20060120.170800.custom.cdf', - 'twpsondewnpnC3.b1.20060120.231500.custom.cdf', - 'twpsondewnpnC3.b1.20060121.051500.custom.cdf', - 'twpsondewnpnC3.b1.20060121.111600.custom.cdf', - 'twpsondewnpnC3.b1.20060121.171600.custom.cdf', - 'twpsondewnpnC3.b1.20060121.231600.custom.cdf', - 'twpsondewnpnC3.b1.20060122.052600.custom.cdf', - 'twpsondewnpnC3.b1.20060122.111500.custom.cdf', - 'twpsondewnpnC3.b1.20060122.171800.custom.cdf', - 'twpsondewnpnC3.b1.20060122.232600.custom.cdf', - 'twpsondewnpnC3.b1.20060123.052500.custom.cdf', - 'twpsondewnpnC3.b1.20060123.111700.custom.cdf', - 'twpsondewnpnC3.b1.20060123.171600.custom.cdf', - 'twpsondewnpnC3.b1.20060123.231500.custom.cdf', - 'twpsondewnpnC3.b1.20060124.051500.custom.cdf', - 'twpsondewnpnC3.b1.20060124.111800.custom.cdf', - 'twpsondewnpnC3.b1.20060124.171700.custom.cdf', - 'twpsondewnpnC3.b1.20060124.231500.custom.cdf'] +twp_sonde_wildcard_list = [ + 'twpsondewnpnC3.b1.20060119.050300.custom.cdf', + 'twpsondewnpnC3.b1.20060119.112000.custom.cdf', + 'twpsondewnpnC3.b1.20060119.163300.custom.cdf', + 'twpsondewnpnC3.b1.20060119.231600.custom.cdf', + 'twpsondewnpnC3.b1.20060120.043800.custom.cdf', + 'twpsondewnpnC3.b1.20060120.111900.custom.cdf', + 'twpsondewnpnC3.b1.20060120.170800.custom.cdf', + 'twpsondewnpnC3.b1.20060120.231500.custom.cdf', + 'twpsondewnpnC3.b1.20060121.051500.custom.cdf', + 'twpsondewnpnC3.b1.20060121.111600.custom.cdf', + 'twpsondewnpnC3.b1.20060121.171600.custom.cdf', + 'twpsondewnpnC3.b1.20060121.231600.custom.cdf', + 'twpsondewnpnC3.b1.20060122.052600.custom.cdf', + 'twpsondewnpnC3.b1.20060122.111500.custom.cdf', + 'twpsondewnpnC3.b1.20060122.171800.custom.cdf', + 'twpsondewnpnC3.b1.20060122.232600.custom.cdf', + 'twpsondewnpnC3.b1.20060123.052500.custom.cdf', + 'twpsondewnpnC3.b1.20060123.111700.custom.cdf', + 'twpsondewnpnC3.b1.20060123.171600.custom.cdf', + 'twpsondewnpnC3.b1.20060123.231500.custom.cdf', + 'twpsondewnpnC3.b1.20060124.051500.custom.cdf', + 'twpsondewnpnC3.b1.20060124.111800.custom.cdf', + 'twpsondewnpnC3.b1.20060124.171700.custom.cdf', + 'twpsondewnpnC3.b1.20060124.231500.custom.cdf', +] EXAMPLE_TWP_SONDE_WILDCARD = [DATASETS.fetch(file) for file in twp_sonde_wildcard_list] -twp_sonde_20060121_list = ['twpsondewnpnC3.b1.20060121.051500.custom.cdf', - 'twpsondewnpnC3.b1.20060121.111600.custom.cdf', - 'twpsondewnpnC3.b1.20060121.171600.custom.cdf', - 'twpsondewnpnC3.b1.20060121.231600.custom.cdf'] +twp_sonde_20060121_list = [ + 'twpsondewnpnC3.b1.20060121.051500.custom.cdf', + 'twpsondewnpnC3.b1.20060121.111600.custom.cdf', + 'twpsondewnpnC3.b1.20060121.171600.custom.cdf', + 'twpsondewnpnC3.b1.20060121.231600.custom.cdf', +] EXAMPLE_TWP_SONDE_20060121 = [DATASETS.fetch(file) for file in twp_sonde_20060121_list] -stamp_wildcard_list = ['sgpstampE13.b1.20200101.000000.nc', - 'sgpstampE31.b1.20200101.000000.nc', - 'sgpstampE32.b1.20200101.000000.nc', - 'sgpstampE33.b1.20200101.000000.nc', - 'sgpstampE34.b1.20200101.000000.nc', - 'sgpstampE9.b1.20200101.000000.nc'] +stamp_wildcard_list = [ + 'sgpstampE13.b1.20200101.000000.nc', + 'sgpstampE31.b1.20200101.000000.nc', + 'sgpstampE32.b1.20200101.000000.nc', + 'sgpstampE33.b1.20200101.000000.nc', + 'sgpstampE34.b1.20200101.000000.nc', + 'sgpstampE9.b1.20200101.000000.nc', +] EXAMPLE_STAMP_WILDCARD = [DATASETS.fetch(file) for file in stamp_wildcard_list] -mmcr_list = ['sgpmmcrC1.b1.1.cdf', - 'sgpmmcrC1.b1.2.cdf'] +mmcr_list = ['sgpmmcrC1.b1.1.cdf', 'sgpmmcrC1.b1.2.cdf'] EXAMPLE_MMCR = [DATASETS.fetch(file) for file in mmcr_list] diff --git a/act/utils/__init__.py b/act/utils/__init__.py index 035bf99de3..1f6c29544f 100644 --- a/act/utils/__init__.py +++ b/act/utils/__init__.py @@ -7,7 +7,16 @@ __getattr__, __dir__, __all__ = lazy.attach( __name__, - submodules=['data_utils', 'datetime_utils', 'geo_utils', 'inst_utils', 'io_utils', 'qc_utils', 'radiance_utils', 'ship_utils'], + submodules=[ + 'data_utils', + 'datetime_utils', + 'geo_utils', + 'inst_utils', + 'io_utils', + 'qc_utils', + 'radiance_utils', + 'ship_utils', + ], submod_attrs={ 'data_utils': [ 'ChangeUnits', @@ -31,7 +40,7 @@ 'numpy_to_arm_date', 'reduce_time_ranges', 'date_parser', - 'adjust_timestamp' + 'adjust_timestamp', ], 'geo_utils': [ 'add_solar_variable', @@ -44,13 +53,14 @@ 'qc_utils': ['calculate_dqr_times'], 'radiance_utils': ['planck_converter'], 'ship_utils': ['calc_cog_sog', 'proc_scog'], - 'io_utils': ['pack_tar', - 'unpack_tar', - 'cleanup_files', - 'is_gunzip_file', - 'pack_gzip', - 'unpack_gzip', - 'generate_movie' + 'io_utils': [ + 'pack_tar', + 'unpack_tar', + 'cleanup_files', + 'is_gunzip_file', + 'pack_gzip', + 'unpack_gzip', + 'generate_movie', ], }, ) diff --git a/act/utils/data_utils.py b/act/utils/data_utils.py index 184aac92ed..38853c2439 100644 --- a/act/utils/data_utils.py +++ b/act/utils/data_utils.py @@ -120,7 +120,7 @@ def change_units( # @xr.register_dataset_accessor('utils') -class DatastreamParserARM(object): +class DatastreamParserARM: ''' Class to parse ARM datastream names or filenames into its components. Will return None for each attribute if not extracted from the filename. @@ -156,6 +156,7 @@ class DatastreamParserARM(object): ''' + def __init__(self, ds=''): ''' Constructor that initializes datastream data member and runs @@ -258,8 +259,7 @@ def datastream(self): ''' try: - return ''.join((self.__site, self.__class, self.__facility, '.', - self.__level)) + return ''.join((self.__site, self.__class, self.__facility, '.', self.__level)) except TypeError: return None @@ -315,8 +315,7 @@ def datastream_standard(self): ''' try: - return ''.join((self.site, self.datastream_class, self.facility, - '.', self.level)) + return ''.join((self.site, self.datastream_class, self.facility, '.', self.level)) except TypeError: return None @@ -999,7 +998,6 @@ def convert_to_potential_temp( temp_var_units=None, press_var_units=None, ): - """ Converts temperature to potential temperature. @@ -1269,9 +1267,7 @@ def arm_site_location_search(site_code='sgp', facility_code=None): "distinct_facility_code": { "terms": { "field": "facility_code.keyword", - "order": { - "_key": "asc" - }, + "order": {"_key": "asc"}, "size": 7000, }, "aggs": { @@ -1283,7 +1279,7 @@ def arm_site_location_search(site_code='sgp', facility_code=None): "facility_code", "location", ], - "size": 1 + "size": 1, }, }, }, @@ -1298,7 +1294,9 @@ def arm_site_location_search(site_code='sgp', facility_code=None): } # Uses requests to grab metadata from arm.gov. - response = requests.get('https://adc.arm.gov/elastic/metadata/_search', headers=headers, json=json_data) + response = requests.get( + 'https://adc.arm.gov/elastic/metadata/_search', headers=headers, json=json_data + ) # Loads the text to a dictionary response_dict = json.loads(response.text) @@ -1306,19 +1304,19 @@ def arm_site_location_search(site_code='sgp', facility_code=None): coord_dict = {} # Loop through each facility. for i in range(len(response_dict['aggregations']['distinct_facility_code']['buckets'])): - site_info = response_dict['aggregations']['distinct_facility_code']['buckets'][i]['hits']['hits']['hits'][0]['_source'] + site_info = response_dict['aggregations']['distinct_facility_code']['buckets'][i]['hits'][ + 'hits' + ]['hits'][0]['_source'] site = site_info['site_code'] facility = site_info['facility_code'] # Some sites do not contain coordinate information, return None if that is the case. if site_info['location'] is None: - coords = {'latitude': None, - 'longitude': None} + coords = {'latitude': None, 'longitude': None} else: lat, lon = site_info['location'].split(',') lat = float(lat) lon = float(lon) - coords = {'latitude': lat, - 'longitude': lon} + coords = {'latitude': lat, 'longitude': lon} coord_dict.setdefault(site + ' ' + facility, coords) return coord_dict diff --git a/act/utils/datetime_utils.py b/act/utils/datetime_utils.py index e2e890bf9e..403de1bc18 100644 --- a/act/utils/datetime_utils.py +++ b/act/utils/datetime_utils.py @@ -55,6 +55,7 @@ def numpy_to_arm_date(_date, returnTime=False): """ from dateutil.parser._parser import ParserError + try: date = pd.to_datetime(str(_date)) if returnTime is False: @@ -261,7 +262,10 @@ def adjust_timestamp(ds, time_bounds='time_bounds', align='left', offset=None): elif align == 'right': time_start = [np.datetime64(t[1]) for t in time_bounds] elif align == 'center': - time_start = [np.datetime64(t[0]) + (np.datetime64(t[0]) - np.datetime64(t[1])) / 2. for t in time_bounds] + time_start = [ + np.datetime64(t[0]) + (np.datetime64(t[0]) - np.datetime64(t[1])) / 2.0 + for t in time_bounds + ] else: raise ValueError('Align should be set to one of [left, right, middle]') diff --git a/act/utils/geo_utils.py b/act/utils/geo_utils.py index 27c2dac85c..b7361751e9 100644 --- a/act/utils/geo_utils.py +++ b/act/utils/geo_utils.py @@ -4,13 +4,10 @@ """ -import re from datetime import datetime, timedelta, timezone from pathlib import Path -import dateutil.parser import numpy as np -import pandas as pd import pytz from skyfield import almanac from skyfield.api import load, load_file, wgs84 diff --git a/act/utils/io_utils.py b/act/utils/io_utils.py index 839b7b8ae9..9c0e0491d2 100644 --- a/act/utils/io_utils.py +++ b/act/utils/io_utils.py @@ -2,17 +2,15 @@ import tarfile from os import PathLike from shutil import rmtree -import random -import string import gzip import shutil import tempfile -import numpy as np import types try: import moviepy.video.io.ImageSequenceClip from moviepy.video.io.VideoFileClip import VideoFileClip + MOVIEPY_AVAILABLE = True except ImportError: MOVIEPY_AVAILABLE = False @@ -77,8 +75,9 @@ def pack_tar(filenames, write_filename=None, write_directory=None, remove=False) return str(write_filename) -def unpack_tar(tar_files, write_directory=None, temp_dir=False, randomize=True, - return_files=True, remove=False): +def unpack_tar( + tar_files, write_directory=None, temp_dir=False, randomize=True, return_files=True, remove=False +): """ Unpacks TAR file contents into provided base directory @@ -316,9 +315,7 @@ def generate_movie(images, write_filename=None, fps=10, **kwargs): """ if not MOVIEPY_AVAILABLE: - raise ImportError( - 'MoviePy needs to be installed on your system to make movies.' - ) + raise ImportError('MoviePy needs to be installed on your system to make movies.') # Set default movie name if write_filename is None: diff --git a/docs/source/blog_posts/2022/sail_campaign_arm_and_noaa.ipynb b/docs/source/blog_posts/2022/sail_campaign_arm_and_noaa.ipynb index ac8baad20b..dea412251c 100644 --- a/docs/source/blog_posts/2022/sail_campaign_arm_and_noaa.ipynb +++ b/docs/source/blog_posts/2022/sail_campaign_arm_and_noaa.ipynb @@ -76,9 +76,11 @@ "source": [ "# Download the NOAA KPS site files from 22:00 and 23:00\n", "result_22_kps = act.discovery.download_noaa_psl_data(\n", - " site='kps', instrument='Radar FMCW Moment', startdate='20220801', hour='22')\n", + " site='kps', instrument='Radar FMCW Moment', startdate='20220801', hour='22'\n", + ")\n", "result_23_kps = act.discovery.download_noaa_psl_data(\n", - " site='kps', instrument='Radar FMCW Moment', startdate='20220801', hour='23')" + " site='kps', instrument='Radar FMCW Moment', startdate='20220801', hour='23'\n", + ")" ] }, { @@ -94,8 +96,10 @@ "ds1_kps = act.io.noaapsl.read_psl_radar_fmcw_moment([result_22_kps[-1], result_23_kps[-1]])\n", "\n", "# Read in the parsivel files from NOAA's webpage.\n", - "url = ['https://downloads.psl.noaa.gov/psd2/data/realtime/DisdrometerParsivel/Stats/kps/2022/213/kps2221322_stats.txt',\n", - " 'https://downloads.psl.noaa.gov/psd2/data/realtime/DisdrometerParsivel/Stats/kps/2022/213/kps2221323_stats.txt']\n", + "url = [\n", + " 'https://downloads.psl.noaa.gov/psd2/data/realtime/DisdrometerParsivel/Stats/kps/2022/213/kps2221322_stats.txt',\n", + " 'https://downloads.psl.noaa.gov/psd2/data/realtime/DisdrometerParsivel/Stats/kps/2022/213/kps2221323_stats.txt',\n", + "]\n", "ds2_kps = act.io.noaapsl.read_psl_parsivel(url)" ] }, @@ -124,13 +128,23 @@ "# Create display object with both datasets\n", "display = act.plotting.TimeSeriesDisplay(\n", " {\"NOAA Site KPS PSL Radar FMCW\": kps_ds1, \"NOAA Site KPS Parsivel\": kps_ds2},\n", - " subplot_shape=(2,), figsize=(10, 10))\n", + " subplot_shape=(2,),\n", + " figsize=(10, 10),\n", + ")\n", "\n", "# Plot the subplots\n", - "display.plot('reflectivity_uncalibrated', dsname='NOAA Site KPS PSL Radar FMCW',\n", - " cmap='act_HomeyerRainbow', subplot_index=(0,))\n", - "display.plot('number_density_drops', dsname='NOAA Site KPS Parsivel',\n", - " cmap='act_HomeyerRainbow', subplot_index=(1,))\n", + "display.plot(\n", + " 'reflectivity_uncalibrated',\n", + " dsname='NOAA Site KPS PSL Radar FMCW',\n", + " cmap='act_HomeyerRainbow',\n", + " subplot_index=(0,),\n", + ")\n", + "display.plot(\n", + " 'number_density_drops',\n", + " dsname='NOAA Site KPS Parsivel',\n", + " cmap='act_HomeyerRainbow',\n", + " subplot_index=(1,),\n", + ")\n", "# Set limits\n", "display.axes[1].set_ylim([0, 10])\n", "plt.show()" @@ -154,8 +168,8 @@ "# Use arm username and token to retrieve files.\n", "# This is commented out as the files have already been downloaded.\n", "\n", - "#token = 'arm_token'\n", - "#username = 'arm_username'" + "# token = 'arm_token'\n", + "# username = 'arm_username'" ] }, { @@ -165,16 +179,16 @@ "metadata": {}, "outputs": [], "source": [ - "#Specify datastream and date range for KAZR data\n", + "# Specify datastream and date range for KAZR data\n", "ds_kazr = 'guckazrcfrgeM1.a1'\n", "startdate = '2022-08-01'\n", "enddate = '2022-08-01'\n", "\n", "# Data already retrieved, but showing code below on how to download the files.\n", - "#act.discovery.download_data(username, token, ds_kazr, startdate, enddate)\n", + "# act.discovery.download_data(username, token, ds_kazr, startdate, enddate)\n", "\n", "# Index last 2 files for the 22:00 and 23:00 timeframe.\n", - "kazr_files = glob.glob(''.join(['./',ds_kazr,'/*nc']))\n", + "kazr_files = glob.glob(''.join(['./', ds_kazr, '/*nc']))\n", "kazr_files[-2:]\n", "kazr_ds = act.io.arm.read_arm_netcdf(kazr_files[-2:])" ] @@ -186,16 +200,16 @@ "metadata": {}, "outputs": [], "source": [ - "#Specify datastream and date range for KAZR data\n", + "# Specify datastream and date range for KAZR data\n", "ds_ld = 'gucldM1.b1'\n", "startdate = '2022-08-01'\n", "enddate = '2022-08-01'\n", "\n", "# Data already retrieved, but showing code below on how to download the files.\n", - "#act.discovery.download_data(username, token, ds_ld, startdate, enddate)\n", + "# act.discovery.download_data(username, token, ds_ld, startdate, enddate)\n", "\n", "# Index last 2 files for the 22:00 and 23:00 timeframe.\n", - "ld_files = glob.glob(''.join(['./',ds_ld,'/*cdf']))\n", + "ld_files = glob.glob(''.join(['./', ds_ld, '/*cdf']))\n", "ld_ds = act.io.arm.read_arm_netcdf(ld_files[0])" ] }, @@ -233,22 +247,39 @@ "\n", "# Create a series display with all 4 datasets\n", "display = act.plotting.TimeSeriesDisplay(\n", - " {\"NOAA KPS PSL Radar FMCW\": kps_ds1, \"NOAA KPS Parsivel\": kps_ds2,\n", - " \"guckazrcfrgeM1.a1\": kazr_ds, 'gucldM1.b1': ld_ds},\n", - " subplot_shape=(2, 2), figsize=(22, 12))\n", + " {\n", + " \"NOAA KPS PSL Radar FMCW\": kps_ds1,\n", + " \"NOAA KPS Parsivel\": kps_ds2,\n", + " \"guckazrcfrgeM1.a1\": kazr_ds,\n", + " 'gucldM1.b1': ld_ds,\n", + " },\n", + " subplot_shape=(2, 2),\n", + " figsize=(22, 12),\n", + ")\n", "\n", "# Set custom 2 line title for space\n", "title = \"NOAA KPS PSL Radar FMCW\\n reflectivity_uncalibrated on 20220801\"\n", "\n", "# Plot the four subplots\n", - "display.plot('reflectivity_uncalibrated', dsname='NOAA KPS PSL Radar FMCW',\n", - " cmap='act_HomeyerRainbow', set_title=title, subplot_index=(0, 1))\n", - "display.plot('number_density_drops', dsname='NOAA KPS Parsivel',\n", - " cmap='act_HomeyerRainbow', subplot_index=(1, 1))\n", - "display.plot('reflectivity', dsname='guckazrcfrgeM1.a1',\n", - " cmap='act_HomeyerRainbow', subplot_index=(0, 0))\n", - "display.plot('number_density_drops', dsname='gucldM1.b1',\n", - " cmap='act_HomeyerRainbow', subplot_index=(1, 0))\n", + "display.plot(\n", + " 'reflectivity_uncalibrated',\n", + " dsname='NOAA KPS PSL Radar FMCW',\n", + " cmap='act_HomeyerRainbow',\n", + " set_title=title,\n", + " subplot_index=(0, 1),\n", + ")\n", + "display.plot(\n", + " 'number_density_drops',\n", + " dsname='NOAA KPS Parsivel',\n", + " cmap='act_HomeyerRainbow',\n", + " subplot_index=(1, 1),\n", + ")\n", + "display.plot(\n", + " 'reflectivity', dsname='guckazrcfrgeM1.a1', cmap='act_HomeyerRainbow', subplot_index=(0, 0)\n", + ")\n", + "display.plot(\n", + " 'number_density_drops', dsname='gucldM1.b1', cmap='act_HomeyerRainbow', subplot_index=(1, 0)\n", + ")\n", "\n", "# Update limits\n", "display.axes[1, 0].set_ylim([0, 10])\n", @@ -257,11 +288,11 @@ "\n", "\n", "display.axes[0, 0].set_ylim([0, 10000])\n", - "display.axes[0, 0].set_yticklabels(['0', '2', '4','6', '8', '10'])\n", + "display.axes[0, 0].set_yticklabels(['0', '2', '4', '6', '8', '10'])\n", "display.axes[0, 0].set_ylabel('km')\n", "\n", "display.axes[0, 1].set_ylim([0, 10000])\n", - "display.axes[0, 1].set_yticklabels(['0', '2', '4','6', '8', '10'])\n", + "display.axes[0, 1].set_yticklabels(['0', '2', '4', '6', '8', '10'])\n", "display.axes[0, 1].set_ylabel('km')\n", "\n", "plt.show()" @@ -282,13 +313,13 @@ "metadata": {}, "outputs": [], "source": [ - "#Specify datastream and date range for KAZR data\n", + "# Specify datastream and date range for KAZR data\n", "ds_dl = 'gucdlppiM1.b1'\n", "startdate = '2022-08-01'\n", "enddate = '2022-08-01'\n", "\n", - "#act.discovery.download_data(username, token, ds_dl, startdate, enddate)\n", - "dl_ppi_files = glob.glob(''.join(['./',ds_dl,'/*cdf']))" + "# act.discovery.download_data(username, token, ds_dl, startdate, enddate)\n", + "dl_ppi_files = glob.glob(''.join(['./', ds_dl, '/*cdf']))" ] }, { @@ -306,7 +337,8 @@ " ds = act.io.arm.read_arm_netcdf(file)\n", " # Calculate the winds for each gucdlppi dataset.\n", " wind_ds = act.retrievals.compute_winds_from_ppi(\n", - " ds, remove_all_missing=True, snr_threshold=0.008)\n", + " ds, remove_all_missing=True, snr_threshold=0.008\n", + " )\n", " multi_ds.append(wind_ds)\n", "\n", "wind_ds = xr.merge(multi_ds)" @@ -332,15 +364,20 @@ "source": [ "# Create a display object.\n", "display = act.plotting.TimeSeriesDisplay(\n", - " {\"GUC DLPPI Computed Winds over KAZR\": wind_ds,\n", - " \"guckazrcfrgeM1.a1\": kazr_ds,}, figsize=(20, 10))\n", + " {\n", + " \"GUC DLPPI Computed Winds over KAZR\": wind_ds,\n", + " \"guckazrcfrgeM1.a1\": kazr_ds,\n", + " },\n", + " figsize=(20, 10),\n", + ")\n", "\n", "# Plot the wind barbs overlayed on the KAZR reflectivity\n", - "display.plot('reflectivity', dsname='guckazrcfrgeM1.a1',\n", - " cmap='act_HomeyerRainbow', vmin=-20, vmax=30)\n", - "display.plot_barbs_from_spd_dir('wind_speed', 'wind_direction',\n", - " dsname='GUC DLPPI Computed Winds over KAZR',\n", - " invert_y_axis=False)\n", + "display.plot(\n", + " 'reflectivity', dsname='guckazrcfrgeM1.a1', cmap='act_HomeyerRainbow', vmin=-20, vmax=30\n", + ")\n", + "display.plot_barbs_from_spd_dir(\n", + " 'wind_speed', 'wind_direction', dsname='GUC DLPPI Computed Winds over KAZR', invert_y_axis=False\n", + ")\n", "\n", "# Update the x-limits to make sure both wind profiles are shown\n", "# Update the y-limits to show plotted winds\n", diff --git a/examples/discovery/plot_neon.py b/examples/discovery/plot_neon.py index 038fa62778..293e0d70b1 100644 --- a/examples/discovery/plot_neon.py +++ b/examples/discovery/plot_neon.py @@ -11,7 +11,6 @@ import os import glob import matplotlib.pyplot as plt -import numpy as np import act @@ -21,7 +20,9 @@ if token is not None and len(token) > 0: # Download ARM data if a username/token are set - files = act.discovery.download_arm_data(username, token, 'nsametC1.b1', '2022-10-01', '2022-10-07') + files = act.discovery.download_arm_data( + username, token, 'nsametC1.b1', '2022-10-01', '2022-10-07' + ) ds = act.io.arm.read_arm_netcdf(files) # Download NEON Data @@ -33,19 +34,23 @@ # A number of files are downloaded and further explained in the readme file that's downloaded. # These are the files we will need for reading 1 minute NEON data - file = glob.glob(os.path.join( - '.', - 'BARR_DP1.00002.001', - 'NEON.D18.BARR.DP1.00002.001.000.010.001.SAAT_1min.2022-10.expanded.*.csv', - )) - variable_file = glob.glob(os.path.join( - '.', 'BARR_DP1.00002.001', 'NEON.D18.BARR.DP1.00002.001.variables.*.csv' - )) - position_file = glob.glob(os.path.join( - '.', - 'BARR_DP1.00002.001', - 'NEON.D18.BARR.DP1.00002.001.sensor_positions.*.csv', - )) + file = glob.glob( + os.path.join( + '.', + 'BARR_DP1.00002.001', + 'NEON.D18.BARR.DP1.00002.001.000.010.001.SAAT_1min.2022-10.expanded.*.csv', + ) + ) + variable_file = glob.glob( + os.path.join('.', 'BARR_DP1.00002.001', 'NEON.D18.BARR.DP1.00002.001.variables.*.csv') + ) + position_file = glob.glob( + os.path.join( + '.', + 'BARR_DP1.00002.001', + 'NEON.D18.BARR.DP1.00002.001.sensor_positions.*.csv', + ) + ) # Read in the data using the ACT reader, passing with it the variable and position files # for added information in the dataset ds2 = act.io.read_neon_csv(file, variable_files=variable_file, position_files=position_file) diff --git a/examples/io/plot_create_arm_ds.py b/examples/io/plot_create_arm_ds.py index 9840680405..fc2fb1332d 100644 --- a/examples/io/plot_create_arm_ds.py +++ b/examples/io/plot_create_arm_ds.py @@ -37,7 +37,7 @@ 'command_line': 'python plot_create_arm_ds.py', 'process_version': '1.2.3', 'history': 'Processed with Jupyter Workbench', - 'random': '1234253sdgfadf' + 'random': '1234253sdgfadf', } for a in atts: if a in ds.attrs: diff --git a/examples/io/plot_icartt.py b/examples/io/plot_icartt.py index 4e7bc22bc9..df76f95c24 100644 --- a/examples/io/plot_icartt.py +++ b/examples/io/plot_icartt.py @@ -11,7 +11,6 @@ from arm_test_data import DATASETS import matplotlib.pyplot as plt -import numpy as np import act from act.io.icartt import read_icartt diff --git a/examples/io/plot_sodar.py b/examples/io/plot_sodar.py index 0b6e0898fd..c22a0c15a1 100644 --- a/examples/io/plot_sodar.py +++ b/examples/io/plot_sodar.py @@ -23,8 +23,8 @@ # Create an ACT TimeSeriesDisplay. display = act.plotting.TimeSeriesDisplay( - {'Shear, Wind Direction, and Speed at ANL ATMOS': ds}, - subplot_shape=(1,), figsize=(15, 5)) + {'Shear, Wind Direction, and Speed at ANL ATMOS': ds}, subplot_shape=(1,), figsize=(15, 5) +) # Plot shear with a wind barb overlay, while using a color vision # deficiency (CVD) colormap. diff --git a/examples/io/plot_surfrad.py b/examples/io/plot_surfrad.py index 473185f24d..538700cc5d 100644 --- a/examples/io/plot_surfrad.py +++ b/examples/io/plot_surfrad.py @@ -19,7 +19,7 @@ # But it's easy enough to read form the URLs as well url = [ 'https://gml.noaa.gov/aftp/data/radiation/surfrad/Boulder_CO/2023/tbl23008.dat', - 'https://gml.noaa.gov/aftp/data/radiation/surfrad/Boulder_CO/2023/tbl23009.dat' + 'https://gml.noaa.gov/aftp/data/radiation/surfrad/Boulder_CO/2023/tbl23009.dat', ] ds = act.io.read_surfrad(url) diff --git a/examples/plotting/plot_ceil.py b/examples/plotting/plot_ceil.py index b24f810203..2fe687f2d4 100644 --- a/examples/plotting/plot_ceil.py +++ b/examples/plotting/plot_ceil.py @@ -25,7 +25,9 @@ ceil_ds = act.io.arm.read_arm_netcdf(filename_ceil, engine='netcdf4') else: # Example to show how easy it is to download ARM data if a username/token are set - results = act.discovery.download_arm_data(username, token, 'sgpceilC1.b1', '2022-01-14', '2022-01-19') + results = act.discovery.download_arm_data( + username, token, 'sgpceilC1.b1', '2022-01-14', '2022-01-19' + ) ceil_ds = act.io.arm.read_arm_netcdf(results) # Adjust ceilometer data for plotting diff --git a/examples/plotting/plot_contour.py b/examples/plotting/plot_contour.py index fba82cecfd..7a63750a01 100644 --- a/examples/plotting/plot_contour.py +++ b/examples/plotting/plot_contour.py @@ -9,26 +9,27 @@ """ -import glob from arm_test_data import DATASETS import matplotlib.pyplot as plt import act -met_contour_list = ['sgpmetE15.b1.20190508.000000.cdf', - 'sgpmetE31.b1.20190508.000000.cdf', - 'sgpmetE32.b1.20190508.000000.cdf', - 'sgpmetE33.b1.20190508.000000.cdf', - 'sgpmetE34.b1.20190508.000000.cdf', - 'sgpmetE35.b1.20190508.000000.cdf', - 'sgpmetE36.b1.20190508.000000.cdf', - 'sgpmetE37.b1.20190508.000000.cdf', - 'sgpmetE38.b1.20190508.000000.cdf', - 'sgpmetE39.b1.20190508.000000.cdf', - 'sgpmetE40.b1.20190508.000000.cdf', - 'sgpmetE9.b1.20190508.000000.cdf', - 'sgpmetE13.b1.20190508.000000.cdf'] +met_contour_list = [ + 'sgpmetE15.b1.20190508.000000.cdf', + 'sgpmetE31.b1.20190508.000000.cdf', + 'sgpmetE32.b1.20190508.000000.cdf', + 'sgpmetE33.b1.20190508.000000.cdf', + 'sgpmetE34.b1.20190508.000000.cdf', + 'sgpmetE35.b1.20190508.000000.cdf', + 'sgpmetE36.b1.20190508.000000.cdf', + 'sgpmetE37.b1.20190508.000000.cdf', + 'sgpmetE38.b1.20190508.000000.cdf', + 'sgpmetE39.b1.20190508.000000.cdf', + 'sgpmetE40.b1.20190508.000000.cdf', + 'sgpmetE9.b1.20190508.000000.cdf', + 'sgpmetE13.b1.20190508.000000.cdf', +] met_contour_filenames = [DATASETS.fetch(file) for file in met_contour_list] diff --git a/examples/plotting/plot_data_rose.py b/examples/plotting/plot_data_rose.py index 16166d83ee..a0f6361091 100644 --- a/examples/plotting/plot_data_rose.py +++ b/examples/plotting/plot_data_rose.py @@ -14,19 +14,20 @@ """ from arm_test_data import DATASETS -import numpy as np from matplotlib import pyplot as plt import act # Read in some data with wind speed/direction in the file -met_wildcard_list = ['sgpmetE13.b1.20190101.000000.cdf', - 'sgpmetE13.b1.20190102.000000.cdf', - 'sgpmetE13.b1.20190103.000000.cdf', - 'sgpmetE13.b1.20190104.000000.cdf', - 'sgpmetE13.b1.20190105.000000.cdf', - 'sgpmetE13.b1.20190106.000000.cdf', - 'sgpmetE13.b1.20190107.000000.cdf'] +met_wildcard_list = [ + 'sgpmetE13.b1.20190101.000000.cdf', + 'sgpmetE13.b1.20190102.000000.cdf', + 'sgpmetE13.b1.20190103.000000.cdf', + 'sgpmetE13.b1.20190104.000000.cdf', + 'sgpmetE13.b1.20190105.000000.cdf', + 'sgpmetE13.b1.20190106.000000.cdf', + 'sgpmetE13.b1.20190107.000000.cdf', +] met_filenames = [DATASETS.fetch(file) for file in met_wildcard_list] ds = act.io.arm.read_arm_netcdf(met_filenames) diff --git a/examples/plotting/plot_days.py b/examples/plotting/plot_days.py index 70af4b7c1f..bc7b790f33 100644 --- a/examples/plotting/plot_days.py +++ b/examples/plotting/plot_days.py @@ -10,26 +10,34 @@ from arm_test_data import DATASETS import matplotlib.pyplot as plt -import numpy as np import act # Read in the sample MET data -met_wildcard_list = ['sgpmetE13.b1.20190101.000000.cdf', - 'sgpmetE13.b1.20190102.000000.cdf', - 'sgpmetE13.b1.20190103.000000.cdf', - 'sgpmetE13.b1.20190104.000000.cdf', - 'sgpmetE13.b1.20190105.000000.cdf', - 'sgpmetE13.b1.20190106.000000.cdf', - 'sgpmetE13.b1.20190107.000000.cdf'] +met_wildcard_list = [ + 'sgpmetE13.b1.20190101.000000.cdf', + 'sgpmetE13.b1.20190102.000000.cdf', + 'sgpmetE13.b1.20190103.000000.cdf', + 'sgpmetE13.b1.20190104.000000.cdf', + 'sgpmetE13.b1.20190105.000000.cdf', + 'sgpmetE13.b1.20190106.000000.cdf', + 'sgpmetE13.b1.20190107.000000.cdf', +] met_filenames = [DATASETS.fetch(file) for file in met_wildcard_list] ds = act.io.arm.read_arm_netcdf(met_filenames) # Create Plot Display display = act.plotting.WindRoseDisplay(ds, figsize=(15, 15), subplot_shape=(3, 3)) groupby = display.group_by('day') -groupby.plot_group('plot_data', None, dir_field='wdir_vec_mean', spd_field='wspd_vec_mean', - data_field='temp_mean', num_dirs=12, plot_type='line') +groupby.plot_group( + 'plot_data', + None, + dir_field='wdir_vec_mean', + spd_field='wspd_vec_mean', + data_field='temp_mean', + num_dirs=12, + plot_type='line', +) # Set theta tick markers for each axis inside display to be inside the polar axes for i in range(3): diff --git a/examples/plotting/plot_daytime_averages.py b/examples/plotting/plot_daytime_averages.py index 117bf29a5e..6beff17c1b 100644 --- a/examples/plotting/plot_daytime_averages.py +++ b/examples/plotting/plot_daytime_averages.py @@ -14,13 +14,15 @@ import act # Read in the sample MET data -met_wildcard_list = ['sgpmetE13.b1.20190101.000000.cdf', - 'sgpmetE13.b1.20190102.000000.cdf', - 'sgpmetE13.b1.20190103.000000.cdf', - 'sgpmetE13.b1.20190104.000000.cdf', - 'sgpmetE13.b1.20190105.000000.cdf', - 'sgpmetE13.b1.20190106.000000.cdf', - 'sgpmetE13.b1.20190107.000000.cdf'] +met_wildcard_list = [ + 'sgpmetE13.b1.20190101.000000.cdf', + 'sgpmetE13.b1.20190102.000000.cdf', + 'sgpmetE13.b1.20190103.000000.cdf', + 'sgpmetE13.b1.20190104.000000.cdf', + 'sgpmetE13.b1.20190105.000000.cdf', + 'sgpmetE13.b1.20190106.000000.cdf', + 'sgpmetE13.b1.20190107.000000.cdf', +] met_filenames = [DATASETS.fetch(file) for file in met_wildcard_list] ds = act.io.arm.read_arm_netcdf(met_filenames) diff --git a/examples/plotting/plot_enhanced_skewt.py b/examples/plotting/plot_enhanced_skewt.py index 5223c6c3e4..f3258d5b64 100644 --- a/examples/plotting/plot_enhanced_skewt.py +++ b/examples/plotting/plot_enhanced_skewt.py @@ -9,13 +9,9 @@ """ -import glob from arm_test_data import DATASETS from matplotlib import pyplot as plt -import metpy -import numpy as np -import xarray as xr import act diff --git a/examples/plotting/plot_examples.py b/examples/plotting/plot_examples.py index 96a8fe21e6..c9cee8e55b 100644 --- a/examples/plotting/plot_examples.py +++ b/examples/plotting/plot_examples.py @@ -9,7 +9,6 @@ from arm_test_data import DATASETS import matplotlib.pyplot as plt -import xarray as xr import act diff --git a/examples/plotting/plot_heatmap.py b/examples/plotting/plot_heatmap.py index 86238424c8..9e15a1ceab 100644 --- a/examples/plotting/plot_heatmap.py +++ b/examples/plotting/plot_heatmap.py @@ -15,13 +15,15 @@ import act # Read MET data in from the test data area -met_wildcard_list = ['sgpmetE13.b1.20190101.000000.cdf', - 'sgpmetE13.b1.20190102.000000.cdf', - 'sgpmetE13.b1.20190103.000000.cdf', - 'sgpmetE13.b1.20190104.000000.cdf', - 'sgpmetE13.b1.20190105.000000.cdf', - 'sgpmetE13.b1.20190106.000000.cdf', - 'sgpmetE13.b1.20190107.000000.cdf'] +met_wildcard_list = [ + 'sgpmetE13.b1.20190101.000000.cdf', + 'sgpmetE13.b1.20190102.000000.cdf', + 'sgpmetE13.b1.20190103.000000.cdf', + 'sgpmetE13.b1.20190104.000000.cdf', + 'sgpmetE13.b1.20190105.000000.cdf', + 'sgpmetE13.b1.20190106.000000.cdf', + 'sgpmetE13.b1.20190107.000000.cdf', +] met_filenames = [DATASETS.fetch(file) for file in met_wildcard_list] ds = act.io.arm.read_arm_netcdf(met_filenames) @@ -31,8 +33,9 @@ # Plot a heatmap and scatter plot up of RH vs Temperature # Set the number of bins for the x-axis to 25 and y to 20 title = 'Heatmap of MET RH vs Temp' -display.plot_heatmap('temp_mean', 'rh_mean', x_bins=25, y_bins=20, - threshold=0, subplot_index=(0, 0), set_title=title) +display.plot_heatmap( + 'temp_mean', 'rh_mean', x_bins=25, y_bins=20, threshold=0, subplot_index=(0, 0), set_title=title +) # Plot the scatter plot and shade by wind_speed title = 'Scatter plot of MET RH vs Temp' diff --git a/examples/plotting/plot_hist_kwargs.py b/examples/plotting/plot_hist_kwargs.py index 063e19d280..9c153233d5 100644 --- a/examples/plotting/plot_hist_kwargs.py +++ b/examples/plotting/plot_hist_kwargs.py @@ -20,6 +20,5 @@ # Plot data hist_kwargs = {'range': (-10, 10)} histdisplay = act.plotting.DistributionDisplay(met_ds) -histdisplay.plot_stacked_bar('temp_mean', bins=np.arange(-40, 40, 5), - hist_kwargs=hist_kwargs) +histdisplay.plot_stacked_bar('temp_mean', bins=np.arange(-40, 40, 5), hist_kwargs=hist_kwargs) plt.show() diff --git a/examples/plotting/plot_multiple_column.py b/examples/plotting/plot_multiple_column.py index 1836277b3e..066f52d6e8 100644 --- a/examples/plotting/plot_multiple_column.py +++ b/examples/plotting/plot_multiple_column.py @@ -14,13 +14,15 @@ import act # Read in MET files. -met_wildcard_list = ['sgpmetE13.b1.20190101.000000.cdf', - 'sgpmetE13.b1.20190102.000000.cdf', - 'sgpmetE13.b1.20190103.000000.cdf', - 'sgpmetE13.b1.20190104.000000.cdf', - 'sgpmetE13.b1.20190105.000000.cdf', - 'sgpmetE13.b1.20190106.000000.cdf', - 'sgpmetE13.b1.20190107.000000.cdf'] +met_wildcard_list = [ + 'sgpmetE13.b1.20190101.000000.cdf', + 'sgpmetE13.b1.20190102.000000.cdf', + 'sgpmetE13.b1.20190103.000000.cdf', + 'sgpmetE13.b1.20190104.000000.cdf', + 'sgpmetE13.b1.20190105.000000.cdf', + 'sgpmetE13.b1.20190106.000000.cdf', + 'sgpmetE13.b1.20190107.000000.cdf', +] met_filenames = [DATASETS.fetch(file) for file in met_wildcard_list] met_ds = act.io.arm.read_arm_netcdf(met_filenames) diff --git a/examples/plotting/plot_presentweathercode.py b/examples/plotting/plot_presentweathercode.py index dbec1e23d2..c30d72ed1f 100644 --- a/examples/plotting/plot_presentweathercode.py +++ b/examples/plotting/plot_presentweathercode.py @@ -9,7 +9,6 @@ """ from arm_test_data import DATASETS -import numpy as np from matplotlib.dates import DateFormatter from matplotlib.dates import num2date import matplotlib.pyplot as plt @@ -22,12 +21,12 @@ # Decode the Present Weather Codes # Pass it to the function to decode it along with the variable name -ds = act.utils.inst_utils.decode_present_weather(ds, - variable='pwd_pw_code_inst') +ds = act.utils.inst_utils.decode_present_weather(ds, variable='pwd_pw_code_inst') # Calculate Precipitation Accumulation -pre_accum = act.utils.accumulate_precip(ds.where(ds.qc_tbrg_precip_total == 0), - "tbrg_precip_total").tbrg_precip_total_accumulated.compute() +pre_accum = act.utils.accumulate_precip( + ds.where(ds.qc_tbrg_precip_total == 0), "tbrg_precip_total" +).tbrg_precip_total_accumulated.compute() # Add the Precipitation Accum to the MET DataSet ds['tbrg_accum'] = pre_accum @@ -46,8 +45,7 @@ # Assign the ACT display object to the matplotlib figure subplot display.assign_to_figure_axis(fig, ax) # Datastream Names are needed for plotting! -display.plot('tbrg_accum', - label='TBRG Accumualated Precip') +display.plot('tbrg_accum', label='TBRG Accumualated Precip') # Add a day/night background display.day_night_background() @@ -68,7 +66,10 @@ ndates = [num2date(x) for x in xticks] # Grab the PWD codes associated with those ticks -ncode = [ds['pwd_pw_code_inst_decoded'].sel(time=x.replace(tzinfo=None), method='nearest').data.tolist() for x in ndates] +ncode = [ + ds['pwd_pw_code_inst_decoded'].sel(time=x.replace(tzinfo=None), method='nearest').data.tolist() + for x in ndates +] pwd_code = ['\n'.join(x.split(' ')) if len(x) > 20 else x for x in ncode] # Display these select PWD codes as vertical texts along the x-axis @@ -77,11 +78,7 @@ # Plot the PWD code for i, key in enumerate(xticks): - ax.text(key, - ymin, - pwd_code[i], - rotation=90, - va='center') + ax.text(key, ymin, pwd_code[i], rotation=90, va='center') plt.subplots_adjust(bottom=0.20) diff --git a/examples/plotting/plot_rh_timeseries.py b/examples/plotting/plot_rh_timeseries.py index b4c7d65a2d..8dacfb4806 100644 --- a/examples/plotting/plot_rh_timeseries.py +++ b/examples/plotting/plot_rh_timeseries.py @@ -13,30 +13,32 @@ import act # Read in sonde files -twp_sonde_wildcard_list = ['twpsondewnpnC3.b1.20060119.050300.custom.cdf', - 'twpsondewnpnC3.b1.20060119.112000.custom.cdf', - 'twpsondewnpnC3.b1.20060119.163300.custom.cdf', - 'twpsondewnpnC3.b1.20060119.231600.custom.cdf', - 'twpsondewnpnC3.b1.20060120.043800.custom.cdf', - 'twpsondewnpnC3.b1.20060120.111900.custom.cdf', - 'twpsondewnpnC3.b1.20060120.170800.custom.cdf', - 'twpsondewnpnC3.b1.20060120.231500.custom.cdf', - 'twpsondewnpnC3.b1.20060121.051500.custom.cdf', - 'twpsondewnpnC3.b1.20060121.111600.custom.cdf', - 'twpsondewnpnC3.b1.20060121.171600.custom.cdf', - 'twpsondewnpnC3.b1.20060121.231600.custom.cdf', - 'twpsondewnpnC3.b1.20060122.052600.custom.cdf', - 'twpsondewnpnC3.b1.20060122.111500.custom.cdf', - 'twpsondewnpnC3.b1.20060122.171800.custom.cdf', - 'twpsondewnpnC3.b1.20060122.232600.custom.cdf', - 'twpsondewnpnC3.b1.20060123.052500.custom.cdf', - 'twpsondewnpnC3.b1.20060123.111700.custom.cdf', - 'twpsondewnpnC3.b1.20060123.171600.custom.cdf', - 'twpsondewnpnC3.b1.20060123.231500.custom.cdf', - 'twpsondewnpnC3.b1.20060124.051500.custom.cdf', - 'twpsondewnpnC3.b1.20060124.111800.custom.cdf', - 'twpsondewnpnC3.b1.20060124.171700.custom.cdf', - 'twpsondewnpnC3.b1.20060124.231500.custom.cdf'] +twp_sonde_wildcard_list = [ + 'twpsondewnpnC3.b1.20060119.050300.custom.cdf', + 'twpsondewnpnC3.b1.20060119.112000.custom.cdf', + 'twpsondewnpnC3.b1.20060119.163300.custom.cdf', + 'twpsondewnpnC3.b1.20060119.231600.custom.cdf', + 'twpsondewnpnC3.b1.20060120.043800.custom.cdf', + 'twpsondewnpnC3.b1.20060120.111900.custom.cdf', + 'twpsondewnpnC3.b1.20060120.170800.custom.cdf', + 'twpsondewnpnC3.b1.20060120.231500.custom.cdf', + 'twpsondewnpnC3.b1.20060121.051500.custom.cdf', + 'twpsondewnpnC3.b1.20060121.111600.custom.cdf', + 'twpsondewnpnC3.b1.20060121.171600.custom.cdf', + 'twpsondewnpnC3.b1.20060121.231600.custom.cdf', + 'twpsondewnpnC3.b1.20060122.052600.custom.cdf', + 'twpsondewnpnC3.b1.20060122.111500.custom.cdf', + 'twpsondewnpnC3.b1.20060122.171800.custom.cdf', + 'twpsondewnpnC3.b1.20060122.232600.custom.cdf', + 'twpsondewnpnC3.b1.20060123.052500.custom.cdf', + 'twpsondewnpnC3.b1.20060123.111700.custom.cdf', + 'twpsondewnpnC3.b1.20060123.171600.custom.cdf', + 'twpsondewnpnC3.b1.20060123.231500.custom.cdf', + 'twpsondewnpnC3.b1.20060124.051500.custom.cdf', + 'twpsondewnpnC3.b1.20060124.111800.custom.cdf', + 'twpsondewnpnC3.b1.20060124.171700.custom.cdf', + 'twpsondewnpnC3.b1.20060124.231500.custom.cdf', +] sonde_filenames = [DATASETS.fetch(file) for file in twp_sonde_wildcard_list] sonde_ds = act.io.arm.read_arm_netcdf(sonde_filenames) diff --git a/examples/plotting/plot_scatter.py b/examples/plotting/plot_scatter.py index 698f47373f..5a32477e67 100644 --- a/examples/plotting/plot_scatter.py +++ b/examples/plotting/plot_scatter.py @@ -27,57 +27,36 @@ display = act.plotting.DistributionDisplay(ds) # Compare aircraft ground speed with indicated airspeed -display.plot_scatter('true_airspeed', - 'ground_speed', - m_field='ambient_temp', - marker='x', - cbar_label='Ambient Temperature ($^\circ$C)' # noqa W605 - ) +display.plot_scatter( + 'true_airspeed', + 'ground_speed', + m_field='ambient_temp', + marker='x', + cbar_label=r'Ambient Temperature ($^\circ$C)', # noqa W605 +) # Set the range of the field on the x-axis display.set_xrng((40, 140)) display.set_yrng((40, 140)) # Determine the best fit line -z = np.ma.polyfit(ds['true_airspeed'], - ds['ground_speed'], - 1 - ) +z = np.ma.polyfit(ds['true_airspeed'], ds['ground_speed'], 1) p = np.poly1d(z) # Plot the best fit line -display.axes[0].plot(ds['true_airspeed'], - p(ds['true_airspeed']), - 'r', - linewidth=2 - ) +display.axes[0].plot(ds['true_airspeed'], p(ds['true_airspeed']), 'r', linewidth=2) # Display the line equation -display.axes[0].text(45, - 135, - "y = %.3fx + (%.3f)" % (z[0], z[1]), - color='r', - fontsize=12 - ) +display.axes[0].text(45, 135, "y = {:.3f}x + ({:.3f})".format(z[0], z[1]), color='r', fontsize=12) # Calculate Pearson Correlation Coefficient -cc_conc = pearsonr(ds['true_airspeed'], - ds['ground_speed'] - ) +cc_conc = pearsonr(ds['true_airspeed'], ds['ground_speed']) # Display the Pearson CC -display.axes[0].text(45, - 130, - "Pearson CC: %.2f" % (cc_conc[0]), - fontsize=12 - ) +display.axes[0].text(45, 130, "Pearson CC: %.2f" % (cc_conc[0]), fontsize=12) # Display the total number of samples -display.axes[0].text(45, - 125, - "N = %.0f" % (ds['true_airspeed'].data.shape[0]), - fontsize=12 - ) +display.axes[0].text(45, 125, "N = %.0f" % (ds['true_airspeed'].data.shape[0]), fontsize=12) # Display the 1:1 ratio line display.set_ratio_line() diff --git a/examples/plotting/plot_secondary_y.py b/examples/plotting/plot_secondary_y.py index d6062857e9..a7193711eb 100644 --- a/examples/plotting/plot_secondary_y.py +++ b/examples/plotting/plot_secondary_y.py @@ -9,7 +9,6 @@ from arm_test_data import DATASETS import matplotlib.pyplot as plt -import xarray as xr import act diff --git a/examples/plotting/plot_skewt.py b/examples/plotting/plot_skewt.py index 69c3674da8..924a951623 100644 --- a/examples/plotting/plot_skewt.py +++ b/examples/plotting/plot_skewt.py @@ -8,7 +8,6 @@ """ from arm_test_data import DATASETS -import metpy import xarray as xr from matplotlib import pyplot as plt @@ -37,7 +36,15 @@ plt.show() # One could also add options like adiabats and mixing lines skewt = act.plotting.SkewTDisplay(sonde_ds, figsize=(15, 10)) -skewt.plot_from_u_and_v('u_wind', 'v_wind', 'pres', 'tdry', 'dp', plot_dry_adiabats=True, - plot_moist_adiabats=True, plot_mixing_lines=True) +skewt.plot_from_u_and_v( + 'u_wind', + 'v_wind', + 'pres', + 'tdry', + 'dp', + plot_dry_adiabats=True, + plot_moist_adiabats=True, + plot_mixing_lines=True, +) plt.show() sonde_ds.close() diff --git a/examples/plotting/plot_skewt_with_text.py b/examples/plotting/plot_skewt_with_text.py index d37bd8d483..5672ccfd91 100644 --- a/examples/plotting/plot_skewt_with_text.py +++ b/examples/plotting/plot_skewt_with_text.py @@ -11,7 +11,6 @@ from arm_test_data import DATASETS from matplotlib import pyplot as plt -import metpy import numpy as np import xarray as xr diff --git a/examples/plotting/plot_time_height_scatter.py b/examples/plotting/plot_time_height_scatter.py index 950b36d998..71b5df4196 100644 --- a/examples/plotting/plot_time_height_scatter.py +++ b/examples/plotting/plot_time_height_scatter.py @@ -6,8 +6,6 @@ """ -import os -from arm_test_data import DATASETS import matplotlib.pyplot as plt import act from act.tests import sample_files diff --git a/examples/plotting/plot_violin.py b/examples/plotting/plot_violin.py index ce343c8078..3176514dc3 100644 --- a/examples/plotting/plot_violin.py +++ b/examples/plotting/plot_violin.py @@ -25,14 +25,16 @@ display = act.plotting.DistributionDisplay(ds) # Compare aircraft ground speed with ambient temperature -display.plot_violin('ambient_temp', - positions=[1.0], - ) - -display.plot_violin('total_temp', - positions=[2.0], - set_title='Aircraft Temperatures 2018-11-04', - ) +display.plot_violin( + 'ambient_temp', + positions=[1.0], +) + +display.plot_violin( + 'total_temp', + positions=[2.0], + set_title='Aircraft Temperatures 2018-11-04', +) # Update the tick information display.axes[0].set_xticks([0.5, 1, 2, 2.5]) diff --git a/examples/plotting/plot_wind_rose.py b/examples/plotting/plot_wind_rose.py index 3e4eda965a..140e950024 100644 --- a/examples/plotting/plot_wind_rose.py +++ b/examples/plotting/plot_wind_rose.py @@ -14,30 +14,32 @@ import act # Read in sonde files -twp_sonde_wildcard_list = ['twpsondewnpnC3.b1.20060119.050300.custom.cdf', - 'twpsondewnpnC3.b1.20060119.112000.custom.cdf', - 'twpsondewnpnC3.b1.20060119.163300.custom.cdf', - 'twpsondewnpnC3.b1.20060119.231600.custom.cdf', - 'twpsondewnpnC3.b1.20060120.043800.custom.cdf', - 'twpsondewnpnC3.b1.20060120.111900.custom.cdf', - 'twpsondewnpnC3.b1.20060120.170800.custom.cdf', - 'twpsondewnpnC3.b1.20060120.231500.custom.cdf', - 'twpsondewnpnC3.b1.20060121.051500.custom.cdf', - 'twpsondewnpnC3.b1.20060121.111600.custom.cdf', - 'twpsondewnpnC3.b1.20060121.171600.custom.cdf', - 'twpsondewnpnC3.b1.20060121.231600.custom.cdf', - 'twpsondewnpnC3.b1.20060122.052600.custom.cdf', - 'twpsondewnpnC3.b1.20060122.111500.custom.cdf', - 'twpsondewnpnC3.b1.20060122.171800.custom.cdf', - 'twpsondewnpnC3.b1.20060122.232600.custom.cdf', - 'twpsondewnpnC3.b1.20060123.052500.custom.cdf', - 'twpsondewnpnC3.b1.20060123.111700.custom.cdf', - 'twpsondewnpnC3.b1.20060123.171600.custom.cdf', - 'twpsondewnpnC3.b1.20060123.231500.custom.cdf', - 'twpsondewnpnC3.b1.20060124.051500.custom.cdf', - 'twpsondewnpnC3.b1.20060124.111800.custom.cdf', - 'twpsondewnpnC3.b1.20060124.171700.custom.cdf', - 'twpsondewnpnC3.b1.20060124.231500.custom.cdf'] +twp_sonde_wildcard_list = [ + 'twpsondewnpnC3.b1.20060119.050300.custom.cdf', + 'twpsondewnpnC3.b1.20060119.112000.custom.cdf', + 'twpsondewnpnC3.b1.20060119.163300.custom.cdf', + 'twpsondewnpnC3.b1.20060119.231600.custom.cdf', + 'twpsondewnpnC3.b1.20060120.043800.custom.cdf', + 'twpsondewnpnC3.b1.20060120.111900.custom.cdf', + 'twpsondewnpnC3.b1.20060120.170800.custom.cdf', + 'twpsondewnpnC3.b1.20060120.231500.custom.cdf', + 'twpsondewnpnC3.b1.20060121.051500.custom.cdf', + 'twpsondewnpnC3.b1.20060121.111600.custom.cdf', + 'twpsondewnpnC3.b1.20060121.171600.custom.cdf', + 'twpsondewnpnC3.b1.20060121.231600.custom.cdf', + 'twpsondewnpnC3.b1.20060122.052600.custom.cdf', + 'twpsondewnpnC3.b1.20060122.111500.custom.cdf', + 'twpsondewnpnC3.b1.20060122.171800.custom.cdf', + 'twpsondewnpnC3.b1.20060122.232600.custom.cdf', + 'twpsondewnpnC3.b1.20060123.052500.custom.cdf', + 'twpsondewnpnC3.b1.20060123.111700.custom.cdf', + 'twpsondewnpnC3.b1.20060123.171600.custom.cdf', + 'twpsondewnpnC3.b1.20060123.231500.custom.cdf', + 'twpsondewnpnC3.b1.20060124.051500.custom.cdf', + 'twpsondewnpnC3.b1.20060124.111800.custom.cdf', + 'twpsondewnpnC3.b1.20060124.171700.custom.cdf', + 'twpsondewnpnC3.b1.20060124.231500.custom.cdf', +] sonde_filenames = [DATASETS.fetch(file) for file in twp_sonde_wildcard_list] sonde_ds = act.io.arm.read_arm_netcdf(sonde_filenames) diff --git a/examples/plotting/plot_xsection.py b/examples/plotting/plot_xsection.py index 126e3b9f84..6aec1f42a6 100644 --- a/examples/plotting/plot_xsection.py +++ b/examples/plotting/plot_xsection.py @@ -7,11 +7,9 @@ multi-dimensional dataset """ -from datetime import datetime from arm_test_data import DATASETS import matplotlib.pyplot as plt -import xarray as xr import act diff --git a/examples/qc/plot_qc_bsrn.py b/examples/qc/plot_qc_bsrn.py index 5cbae6e687..a06400909d 100644 --- a/examples/qc/plot_qc_bsrn.py +++ b/examples/qc/plot_qc_bsrn.py @@ -69,8 +69,13 @@ display = act.plotting.TimeSeriesDisplay(ds, figsize=(15, 10), subplot_shape=(2,)) # Plot radiation data in top plot. Add QC information to top plot. -display.plot(variable, subplot_index=(0,), day_night_background=True, assessment_overplot=True, - cb_friendly=True) +display.plot( + variable, + subplot_index=(0,), + day_night_background=True, + assessment_overplot=True, + cb_friendly=True, +) # Plot ancillary QC data in bottom plot display.qc_flag_block_plot(variable, subplot_index=(1,), cb_friendly=True) diff --git a/examples/retrievals/plot_cbh_sobel.py b/examples/retrievals/plot_cbh_sobel.py index 100c2e9f25..e5764a1067 100644 --- a/examples/retrievals/plot_cbh_sobel.py +++ b/examples/retrievals/plot_cbh_sobel.py @@ -19,8 +19,9 @@ filename_ceil = DATASETS.fetch('sgpceilC1.b1.20190101.000000.nc') ds = act.io.arm.read_arm_netcdf(filename_ceil) -ds = act.retrievals.cbh.generic_sobel_cbh(ds, variable='backscatter', height_dim='range', - var_thresh=1000.0, fill_na=0.) +ds = act.retrievals.cbh.generic_sobel_cbh( + ds, variable='backscatter', height_dim='range', var_thresh=1000.0, fill_na=0.0 +) # Plot the cloud base height data display = act.plotting.TimeSeriesDisplay(ds, subplot_shape=(1, 2), figsize=(16, 6)) diff --git a/examples/templates/example_template.py b/examples/templates/example_template.py index 6119c56583..2de33a0c90 100644 --- a/examples/templates/example_template.py +++ b/examples/templates/example_template.py @@ -11,7 +11,8 @@ # Download and read file or files with the IO and discovery functions # within ACT, example: results = act.discovery.download_arm_data( - username, token, 'sgpceilC1.b1', '2022-01-14', '2022-01-19') + username, token, 'sgpceilC1.b1', '2022-01-14', '2022-01-19' +) ceil_ds = act.io.arm.read_arm_netcdf(results) # Plot file using the ACT display submodule, example: diff --git a/examples/templates/notebook_and_blog_template.ipynb b/examples/templates/notebook_and_blog_template.ipynb index cefe371a3b..a130b83369 100644 --- a/examples/templates/notebook_and_blog_template.ipynb +++ b/examples/templates/notebook_and_blog_template.ipynb @@ -192,11 +192,7 @@ "start_date = \"2022-01-01T12:00:00\"\n", "end_date = \"2022-01-07T12:00:00\"\n", "\n", - "files = act.discovery.download_data(arm_username,\n", - " arm_password,\n", - " datastream,\n", - " start_date,\n", - " end_date)" + "files = act.discovery.download_data(arm_username, arm_password, datastream, start_date, end_date)" ] }, { @@ -254,8 +250,8 @@ "source": [ "# Create an ACT TimeSeriesDisplay.\n", "display = act.plotting.TimeSeriesDisplay(\n", - " {'Shear, Wind Direction, and Speed at ANL ATMOS': ds},\n", - " subplot_shape=(1,), figsize=(15, 5))\n", + " {'Shear, Wind Direction, and Speed at ANL ATMOS': ds}, subplot_shape=(1,), figsize=(15, 5)\n", + ")\n", "\n", "# Plot shear with a wind barb overlay, while using a color vision\n", "# deficiency (CVD) colormap.\n", diff --git a/examples/utils/plot_tar.py b/examples/utils/plot_tar.py index d09c301871..b4dedaff0d 100644 --- a/examples/utils/plot_tar.py +++ b/examples/utils/plot_tar.py @@ -11,7 +11,6 @@ """ -import os from pathlib import Path # Import standard libraries @@ -27,13 +26,15 @@ # TAR file into read_arm_netcdf() to be unpacked and read. # Here we get a list of MET data files to pack into a TAR bundle -met_wildcard_list = ['sgpmetE13.b1.20190101.000000.cdf', - 'sgpmetE13.b1.20190102.000000.cdf', - 'sgpmetE13.b1.20190103.000000.cdf', - 'sgpmetE13.b1.20190104.000000.cdf', - 'sgpmetE13.b1.20190105.000000.cdf', - 'sgpmetE13.b1.20190106.000000.cdf', - 'sgpmetE13.b1.20190107.000000.cdf'] +met_wildcard_list = [ + 'sgpmetE13.b1.20190101.000000.cdf', + 'sgpmetE13.b1.20190102.000000.cdf', + 'sgpmetE13.b1.20190103.000000.cdf', + 'sgpmetE13.b1.20190104.000000.cdf', + 'sgpmetE13.b1.20190105.000000.cdf', + 'sgpmetE13.b1.20190106.000000.cdf', + 'sgpmetE13.b1.20190107.000000.cdf', +] met_files = [Path(DATASETS.fetch(file)) for file in met_wildcard_list] # We can pass the list of netCDF data files to the pack_tar() function. diff --git a/examples/workflows/plot_aerioe_with_cbh.py b/examples/workflows/plot_aerioe_with_cbh.py index 717d47933e..aed0041f01 100644 --- a/examples/workflows/plot_aerioe_with_cbh.py +++ b/examples/workflows/plot_aerioe_with_cbh.py @@ -22,9 +22,13 @@ if username is None or token is None or len(username) == 0 or len(token) == 0: pass else: - results = act.discovery.download_arm_data(username, token, 'sgpaerioe1turnC1.c1', '2022-02-11', '2022-02-11') + results = act.discovery.download_arm_data( + username, token, 'sgpaerioe1turnC1.c1', '2022-02-11', '2022-02-11' + ) aerioe_ds = act.io.arm.read_arm_netcdf(results) - results = act.discovery.download_arm_data(username, token, 'sgpceilC1.b1', '2022-02-11', '2022-02-11') + results = act.discovery.download_arm_data( + username, token, 'sgpceilC1.b1', '2022-02-11', '2022-02-11' + ) ceil_ds = act.io.arm.read_arm_netcdf(results) # There isn't information content from the AERI above 3 km @@ -39,20 +43,47 @@ # Create a TimeSeriesDisplay object display = act.plotting.TimeSeriesDisplay( - {'AERIoe': aerioe_ds, 'Ceilometer': ceil_ds}, - subplot_shape=(2,), figsize=(20, 10) + {'AERIoe': aerioe_ds, 'Ceilometer': ceil_ds}, subplot_shape=(2,), figsize=(20, 10) ) # Plot data - display.plot('first_cbh', dsname='Ceilometer', marker='+', color='black', markeredgewidth=3, - linewidth=0, subplot_index=(0,), label='cbh') - display.plot('temperature', dsname='AERIoe', cmap='viridis', set_shading='nearest', - add_nan=True, subplot_index=(0,)) - - display.plot('first_cbh', dsname='Ceilometer', marker='+', color='black', markeredgewidth=3, - linewidth=0, subplot_index=(1,), label='cbh') - display.plot('waterVapor', dsname='AERIoe', cmap='HomeyerRainbow', set_shading='nearest', - add_nan=True, subplot_index=(1,)) + display.plot( + 'first_cbh', + dsname='Ceilometer', + marker='+', + color='black', + markeredgewidth=3, + linewidth=0, + subplot_index=(0,), + label='cbh', + ) + display.plot( + 'temperature', + dsname='AERIoe', + cmap='viridis', + set_shading='nearest', + add_nan=True, + subplot_index=(0,), + ) + + display.plot( + 'first_cbh', + dsname='Ceilometer', + marker='+', + color='black', + markeredgewidth=3, + linewidth=0, + subplot_index=(1,), + label='cbh', + ) + display.plot( + 'waterVapor', + dsname='AERIoe', + cmap='HomeyerRainbow', + set_shading='nearest', + add_nan=True, + subplot_index=(1,), + ) # If you want to save it you can # plt.savefig('sgpaerioe1turnC1.c1.20220211.png') diff --git a/examples/workflows/plot_merged_product.py b/examples/workflows/plot_merged_product.py index bc9764bb83..0713ef40a2 100644 --- a/examples/workflows/plot_merged_product.py +++ b/examples/workflows/plot_merged_product.py @@ -36,7 +36,7 @@ # The ECOR and EBBR have different definitions of latent heat # flux and what is positive vs negative. Check out the ARM # Handbooks for more information -ds_ecor['lv_e'].values = ds_ecor['lv_e'].values * -1. +ds_ecor['lv_e'].values = ds_ecor['lv_e'].values * -1.0 # For example purposes, let's rename the ecor latent heat flux ds_ecor = ds_ecor.rename({'lv_e': 'latent_heat_flux_ecor'}) @@ -58,7 +58,9 @@ ds = xr.merge([ds_ecor, ds_ebbr, ds_sebs], compat='override') # Apply the QC information to set all flagged data to missing/NaN -ds.qcfilter.datafilter(del_qc_var=False, rm_assessments=['Bad', 'Incorrect', 'Indeterminate', 'Suspect']) +ds.qcfilter.datafilter( + del_qc_var=False, rm_assessments=['Bad', 'Incorrect', 'Indeterminate', 'Suspect'] +) # Plot up data from the merged dataset for each of the instruments display = act.plotting.TimeSeriesDisplay(ds, figsize=(15, 10), subplot_shape=(3,)) diff --git a/examples/workflows/plot_multiple_dataset.py b/examples/workflows/plot_multiple_dataset.py index fba3c58f05..40264b00fd 100644 --- a/examples/workflows/plot_multiple_dataset.py +++ b/examples/workflows/plot_multiple_dataset.py @@ -27,9 +27,13 @@ met_ds = act.io.arm.read_arm_netcdf(filename_met) else: # Download and read data - results = act.discovery.download_arm_data(username, token, 'sgpceilC1.b1', '2022-01-01', '2022-01-07') + results = act.discovery.download_arm_data( + username, token, 'sgpceilC1.b1', '2022-01-01', '2022-01-07' + ) ceil_ds = act.io.arm.read_arm_netcdf(results) - results = act.discovery.download_arm_data(username, token, 'sgpmetE13.b1', '2022-01-01', '2022-01-07') + results = act.discovery.download_arm_data( + username, token, 'sgpmetE13.b1', '2022-01-01', '2022-01-07' + ) met_ds = act.io.arm.read_arm_netcdf(results) # Read in CEIL data and correct it diff --git a/examples/workflows/plot_qc_transforms.py b/examples/workflows/plot_qc_transforms.py index b3ba93b01c..501318e0c0 100644 --- a/examples/workflows/plot_qc_transforms.py +++ b/examples/workflows/plot_qc_transforms.py @@ -10,7 +10,6 @@ from arm_test_data import DATASETS import matplotlib.pyplot as plt -import xarray as xr import act @@ -42,10 +41,17 @@ print('After: (2 5 - minute averages)', ds_5minb[variable].values[0:2]) ## Plot up the variable and qc block plot -display = act.plotting.TimeSeriesDisplay({'Original': ds, 'Average': ds_5min, 'Average_QCd': ds_5minb}, - figsize=(15, 10), subplot_shape=(2,)) +display = act.plotting.TimeSeriesDisplay( + {'Original': ds, 'Average': ds_5min, 'Average_QCd': ds_5minb}, + figsize=(15, 10), + subplot_shape=(2,), +) display.plot(variable, dsname='Original', subplot_index=(0,), day_night_background=True) -display.plot(variable, dsname='Average', subplot_index=(1,), day_night_background=True, label='No QC') -display.plot(variable, dsname='Average_QCd', subplot_index=(1,), day_night_background=True, label='QC') +display.plot( + variable, dsname='Average', subplot_index=(1,), day_night_background=True, label='No QC' +) +display.plot( + variable, dsname='Average_QCd', subplot_index=(1,), day_night_background=True, label='QC' +) plt.legend() plt.show() diff --git a/examples/workflows/plot_weighted_average.py b/examples/workflows/plot_weighted_average.py index b0645456fc..0199def978 100644 --- a/examples/workflows/plot_weighted_average.py +++ b/examples/workflows/plot_weighted_average.py @@ -35,13 +35,15 @@ # } # Get a list of filenames to use -met_wildcard_list = ['sgpmetE13.b1.20190101.000000.cdf', - 'sgpmetE13.b1.20190102.000000.cdf', - 'sgpmetE13.b1.20190103.000000.cdf', - 'sgpmetE13.b1.20190104.000000.cdf', - 'sgpmetE13.b1.20190105.000000.cdf', - 'sgpmetE13.b1.20190106.000000.cdf', - 'sgpmetE13.b1.20190107.000000.cdf'] +met_wildcard_list = [ + 'sgpmetE13.b1.20190101.000000.cdf', + 'sgpmetE13.b1.20190102.000000.cdf', + 'sgpmetE13.b1.20190103.000000.cdf', + 'sgpmetE13.b1.20190104.000000.cdf', + 'sgpmetE13.b1.20190105.000000.cdf', + 'sgpmetE13.b1.20190106.000000.cdf', + 'sgpmetE13.b1.20190107.000000.cdf', +] ds = {} new = {} diff --git a/guides/GUIDE_V2.rst b/guides/GUIDE_V2.rst index f276012691..fb26e247fb 100644 --- a/guides/GUIDE_V2.rst +++ b/guides/GUIDE_V2.rst @@ -51,7 +51,7 @@ Similar to the discovery module, functionality has not changed but the naming co Plotting ======== -A major change to how secondary y-axes are handled was implemented in the TimeSeriesDisplay and DistributionDisplay modules. Currently, those plotting routines return a 1-D array of display axes. This has always made the secondary y-axis more difficult to configure and use. In the new version, it will return a 2-D array of display axes [[left axes, right axes]] to make it simpler to utilize. +A major change to how secondary y-axes are handled was implemented in the TimeSeriesDisplay and DistributionDisplay modules. Currently, those plotting routines return a 1-D array of display axes. This has always made the secondary y-axis more difficult to configure and use. In the new version, it will return a 2-D array of display axes [[left axes, right axes]] to make it simpler to utilize. HistogramDisplay is being renamed to DistributionDisplay to be more inclusive of the variety of visualization types that are housed there. Additionally there are changes to two of the plot names to be more consistent with the others. diff --git a/guides/act_cheatsheet.tex b/guides/act_cheatsheet.tex index 819bc7a6bd..891a8e6a78 100644 --- a/guides/act_cheatsheet.tex +++ b/guides/act_cheatsheet.tex @@ -396,8 +396,8 @@ \begin{poster} { -headerborder=closed, colspacing=0.8em, bgColorOne=white, bgColorTwo=white, borderColor=lightblue, headerColorOne=black, headerColorTwo=lightblue, -headerFontColor=white, boxColorOne=white, textborder=roundedleft, eyecatcher=true, headerheight=0.06\textheight, headershape=roundedright, headerfont=\Large\bf\textsc, linewidth=2pt +headerborder=closed, colspacing=0.8em, bgColorOne=white, bgColorTwo=white, borderColor=lightblue, headerColorOne=black, headerColorTwo=lightblue, +headerFontColor=white, boxColorOne=white, textborder=roundedleft, eyecatcher=true, headerheight=0.06\textheight, headershape=roundedright, headerfont=\Large\bf\textsc, linewidth=2pt } %---------------------------------------------------------------- % Title @@ -436,7 +436,7 @@ $>$$>$$>$ display.put\_display\_in\_subplot(\\ \-\hspace{1.2cm} display, subplot\_index))\\ \-\hspace{0.2cm} $\bullet$ This will place a Display object into a specific\\ -\-\hspace{0.5cm} subplot. +\-\hspace{0.5cm} subplot. \end{tabular} \begin{tabular}{@{}ll@{}} diff --git a/requirements.txt b/requirements.txt index 9098993baf..6b8e108fbd 100644 --- a/requirements.txt +++ b/requirements.txt @@ -18,4 +18,4 @@ fsspec metpy lxml cmweather -aiohttp>=3.9.0b1 \ No newline at end of file +aiohttp>=3.9.0b1 diff --git a/scripts/ads.py b/scripts/ads.py index 5a61ff51fb..d4cd336d50 100644 --- a/scripts/ads.py +++ b/scripts/ads.py @@ -10,7 +10,6 @@ """ import argparse -import re import json import glob import ast @@ -21,6 +20,7 @@ try: import cartopy.crs as ccrs + CARTOPY_AVAILABLE = True except ImportError: CARTOPY_AVAILABLE = False @@ -51,8 +51,10 @@ def option_error_check(args, error_fields, check_all=False): if not value.startswith(prepend): error_fields[ii] = prepend + value - print(f"\n{pathlib.Path(__file__).name}: error: {how_many} of the arguments " - f"{' '.join(error_fields)} is requried\n") + print( + f"\n{pathlib.Path(__file__).name}: error: {how_many} of the arguments " + f"{' '.join(error_fields)} is requried\n" + ) exit() @@ -118,7 +120,8 @@ def find_drop_vars(args): keep_vars_additional = [] for var_name in keep_vars: qc_var_name = ds.qcfilter.check_for_ancillary_qc( - var_name, add_if_missing=False, cleanup=False) + var_name, add_if_missing=False, cleanup=False + ) if qc_var_name is not None: keep_vars_additional.append(qc_var_name) @@ -142,16 +145,25 @@ def geodisplay(args): except KeyError: pass - display = act.plotting.GeographicPlotDisplay({dsname: ds}, - figsize=args.figsize) - - display.geoplot(data_field=args.field, lat_field=args.latitude, - lon_field=args.longitude, dsname=dsname, - cbar_label=args.cb_label, title=args.set_title, - plot_buffer=args.plot_buffer, stamen=args.stamen, - tile=args.tile, cartopy_feature=args.cfeatures, - cmap=args.cmap, text=args.text, gridlines=args.gridlines, - projection=args.projection, **args.kwargs) + display = act.plotting.GeographicPlotDisplay({dsname: ds}, figsize=args.figsize) + + display.geoplot( + data_field=args.field, + lat_field=args.latitude, + lon_field=args.longitude, + dsname=dsname, + cbar_label=args.cb_label, + title=args.set_title, + plot_buffer=args.plot_buffer, + stamen=args.stamen, + tile=args.tile, + cartopy_feature=args.cfeatures, + cmap=args.cmap, + text=args.text, + gridlines=args.gridlines, + projection=args.projection, + **args.kwargs, + ) plt.savefig(args.out_path) plt.show() @@ -175,26 +187,33 @@ def skewt(args): display = act.plotting.SkewTDisplay({dsname: ds}, figsize=args.figsize) if args.from_u_and_v: - display.plot_from_u_and_v(u_field=args.u_wind, v_field=args.v_wind, - p_field=args.p_field, t_field=args.t_field, - td_field=args.td_field, - subplot_index=subplot_index, - dsname=dsname, show_parcel=args.show_parcel, - p_levels_to_plot=args.plevels_plot, - shade_cape=args.shade_cape, - shade_cin=args.shade_cin, - set_title=args.set_title, - plot_barbs_kwargs=args.plot_barbs_kwargs, - plot_kwargs=args.plot_kwargs) + display.plot_from_u_and_v( + u_field=args.u_wind, + v_field=args.v_wind, + p_field=args.p_field, + t_field=args.t_field, + td_field=args.td_field, + subplot_index=subplot_index, + dsname=dsname, + show_parcel=args.show_parcel, + p_levels_to_plot=args.plevels_plot, + shade_cape=args.shade_cape, + shade_cin=args.shade_cin, + set_title=args.set_title, + plot_barbs_kwargs=args.plot_barbs_kwargs, + plot_kwargs=args.plot_kwargs, + ) if args.from_spd_and_dir: - display.plot_from_spd_and_dir(spd_field=args.spd_field, - dir_field=args.dir_field, - p_field=args.p_field, - t_field=args.t_field, - td_field=args.td_field, - dsname=dsname, - **args.kwargs) + display.plot_from_spd_and_dir( + spd_field=args.spd_field, + dir_field=args.dir_field, + p_field=args.p_field, + t_field=args.t_field, + td_field=args.td_field, + dsname=dsname, + **args.kwargs, + ) plt.savefig(args.out_path) plt.show() @@ -218,18 +237,26 @@ def xsection(args): display = act.plotting.XSectionDisplay({dsname: ds}, figsize=args.figsize) if args.plot_xsection: - display.plot_xsection(dsname=dsname, varname=args.field, - x=args.x_field, y=args.y_field, - subplot_index=subplot_index, - sel_kwargs=args.sel_kwargs, - isel_kwargs=args.isel_kwargs, **args.kwargs) + display.plot_xsection( + dsname=dsname, + varname=args.field, + x=args.x_field, + y=args.y_field, + subplot_index=subplot_index, + sel_kwargs=args.sel_kwargs, + isel_kwargs=args.isel_kwargs, + **args.kwargs, + ) if args.xsection_map: - display.plot_xsection_map(dsname=dsname, varname=args.field, - subplot_index=subplot_index, - coastlines=args.coastlines, - background=args.background, - **args.kwargs) + display.plot_xsection_map( + dsname=dsname, + varname=args.field, + subplot_index=subplot_index, + coastlines=args.coastlines, + background=args.background, + **args.kwargs, + ) plt.savefig(args.out_path) plt.show() @@ -239,7 +266,6 @@ def xsection(args): def wind_rose(args): - drop_vars = find_drop_vars(args) ds = act.io.arm.read_arm_netcdf(args.file_path, drop_variables=drop_vars) @@ -253,15 +279,20 @@ def wind_rose(args): except KeyError: pass - display = act.plotting.WindRoseDisplay({dsname: ds}, - figsize=args.figsize) - - display.plot(dir_field=args.dir_field, spd_field=args.spd_field, - subplot_index=subplot_index, - dsname=dsname, cmap=args.cmap, - set_title=args.set_title, - num_dirs=args.num_dir, spd_bins=args.spd_bins, - tick_interval=args.tick_interval, **args.kwargs) + display = act.plotting.WindRoseDisplay({dsname: ds}, figsize=args.figsize) + + display.plot( + dir_field=args.dir_field, + spd_field=args.spd_field, + subplot_index=subplot_index, + dsname=dsname, + cmap=args.cmap, + set_title=args.set_title, + num_dirs=args.num_dir, + spd_bins=args.spd_bins, + tick_interval=args.tick_interval, + **args.kwargs, + ) plt.savefig(args.out_path) plt.show() plt.close(display.fig) @@ -270,7 +301,6 @@ def wind_rose(args): def timeseries(args): - drop_vars = find_drop_vars(args) ds = act.io.arm.read_arm_netcdf(args.file_path, drop_variables=drop_vars) @@ -289,11 +319,19 @@ def timeseries(args): pass display = act.plotting.TimeSeriesDisplay( - {dsname: ds}, figsize=args.figsize, - subplot_shape=subplot_shape) - - options = ['plot', 'barbs_spd_dir', 'barbs_u_v', 'xsection_from_1d', - 'time_height_scatter', 'qc', 'fill_between', 'multi_panel'] + {dsname: ds}, figsize=args.figsize, subplot_shape=subplot_shape + ) + + options = [ + 'plot', + 'barbs_spd_dir', + 'barbs_u_v', + 'xsection_from_1d', + 'time_height_scatter', + 'qc', + 'fill_between', + 'multi_panel', + ] option_error_check(args, options) if args.plot: @@ -303,20 +341,27 @@ def timeseries(args): else: yrange = args.set_yrange display.plot( - field=args.field, dsname=dsname, cmap=args.cmap, - set_title=args.set_title, add_nan=args.add_nan, + field=args.field, + dsname=dsname, + cmap=args.cmap, + set_title=args.set_title, + add_nan=args.add_nan, subplot_index=subplot_index, use_var_for_y=args.var_y, day_night_background=args.day_night, invert_y_axis=args.invert_y_axis, - abs_limits=args.abs_limits, time_rng=args.time_rng, + abs_limits=args.abs_limits, + time_rng=args.time_rng, assessment_overplot=args.assessment_overplot, assessment_overplot_category=args.overplot_category, assessment_overplot_category_color=args.category_color, - force_line_plot=args.force_line_plot, labels=args.labels, - cbar_label=args.cb_label, secondary_y=args.secondary_y, + force_line_plot=args.force_line_plot, + labels=args.labels, + cbar_label=args.cb_label, + secondary_y=args.secondary_y, y_rng=yrange, - **args.kwargs) + **args.kwargs, + ) if args.barbs_spd_dir: display.plot_barbs_from_spd_dir( @@ -324,12 +369,15 @@ def timeseries(args): spd_field=args.spd_field, pres_field=args.p_field, dsname=dsname, - **args.kwargs) + **args.kwargs, + ) if args.barbs_u_v: display.plot_barbs_from_u_v( - u_field=args.u_wind, v_field=args.v_wind, - pres_field=args.p_field, dsname=dsname, + u_field=args.u_wind, + v_field=args.v_wind, + pres_field=args.p_field, + dsname=dsname, set_title=args.set_title, invert_y_axis=args.invert_y_axis, day_night_background=args.day_night, @@ -337,49 +385,61 @@ def timeseries(args): num_barbs_y=args.num_barb_y, use_var_for_y=args.var_y, subplot_index=subplot_index, - **args.kwargs) + **args.kwargs, + ) if args.xsection_from_1d: option_error_check(args, 'field') display.plot_time_height_xsection_from_1d_data( - data_field=args.field, pres_field=args.p_field, - dsname=dsname, set_title=args.set_title, + data_field=args.field, + pres_field=args.p_field, + dsname=dsname, + set_title=args.set_title, day_night_background=args.day_night, num_time_periods=args.num_time_periods, num_y_levels=args.num_y_levels, invert_y_axis=args.invert_y_axis, subplot_index=subplot_index, cbar_label=args.cb_label, - **args.kwargs) + **args.kwargs, + ) if args.time_height_scatter: option_error_check(args, 'field') display.time_height_scatter( - data_field=args.field, dsname=dsname, - cmap=args.cmap, alt_label=args.alt_label, - alt_field=args.alt_field, cb_label=args.cb_label, - **args.kwargs) + data_field=args.field, + dsname=dsname, + cmap=args.cmap, + alt_label=args.alt_label, + alt_field=args.alt_field, + cb_label=args.cb_label, + **args.kwargs, + ) if args.qc: option_error_check(args, 'field') display.qc_flag_block_plot( - data_field=args.field, dsname=dsname, + data_field=args.field, + dsname=dsname, subplot_index=subplot_index, time_rng=args.time_rng, assessment_color=args.assessment_color, - **args.kwargs) + **args.kwargs, + ) if args.fill_between: option_error_check(args, 'field') display.fill_between( - field=args.field, dsname=dsname, + field=args.field, + dsname=dsname, subplot_index=subplot_index, set_title=args.set_title, secondary_y=args.secondary_y, - **args.kwargs) + **args.kwargs, + ) if args.multi_panel: option_error_check(args, ['fields', 'plot_type'], check_all=True) @@ -387,27 +447,36 @@ def timeseries(args): for i, j, k in zip(args.fields, subplot_index, args.plot_type): if k == 'plot': display.plot( - field=i, dsname=dsname, cmap=args.cmap, - set_title=args.set_title, add_nan=args.add_nan, + field=i, + dsname=dsname, + cmap=args.cmap, + set_title=args.set_title, + add_nan=args.add_nan, subplot_index=j, use_var_for_y=args.var_y, day_night_background=args.day_night, invert_y_axis=args.invert_y_axis, - abs_limits=args.abs_limits, time_rng=args.time_rng, + abs_limits=args.abs_limits, + time_rng=args.time_rng, assessment_overplot=args.assessment_overplot, assessment_overplot_category=args.overplot_category, assessment_overplot_category_color=args.category_color, - force_line_plot=args.force_line_plot, labels=args.labels, - cbar_label=args.cb_label, secondary_y=args.secondary_y, - **args.kwargs) + force_line_plot=args.force_line_plot, + labels=args.labels, + cbar_label=args.cb_label, + secondary_y=args.secondary_y, + **args.kwargs, + ) if k == 'qc': display.qc_flag_block_plot( - data_field=i, dsname=dsname, + data_field=i, + dsname=dsname, subplot_index=j, time_rng=args.time_rng, assessment_color=args.assessment_color, - **args.kwargs) + **args.kwargs, + ) plt.savefig(args.out_path) plt.show() @@ -417,7 +486,6 @@ def timeseries(args): def histogram(args): - drop_vars = find_drop_vars(args) ds = act.io.arm.read_arm_netcdf(args.file_path, drop_variables=drop_vars) @@ -433,44 +501,58 @@ def histogram(args): pass display = act.plotting.DistributionDisplay( - {dsname: ds}, figsize=args.figsize, - subplot_shape=subplot_shape) + {dsname: ds}, figsize=args.figsize, subplot_shape=subplot_shape + ) if args.stacked_bar_graph: display.plot_stacked_bar_graph( - field=args.field, dsname=dsname, - bins=args.bins, density=args.density, + field=args.field, + dsname=dsname, + bins=args.bins, + density=args.density, sortby_field=args.sortby_field, sortby_bins=args.sortby_bins, set_title=args.set_title, subplot_index=subplot_index, - **args.kwargs) + **args.kwargs, + ) if args.size_dist: display.plot_size_distribution( - field=args.field, bins=args.bin_field, - time=args.time, dsname=dsname, + field=args.field, + bins=args.bin_field, + time=args.time, + dsname=dsname, set_title=args.set_title, subplot_index=subplot_index, - **args.kwargs) + **args.kwargs, + ) if args.stairstep: display.plot_stairstep_graph( - field=args.field, dsname=dsname, - bins=args.bins, density=args.density, + field=args.field, + dsname=dsname, + bins=args.bins, + density=args.density, sortby_field=args.sortby_field, sortby_bins=args.sortby_bins, set_title=args.set_title, subplot_index=subplot_index, - **args.kwargs) + **args.kwargs, + ) if args.heatmap: display.plot_heatmap( - x_field=args.x_field, y_field=args.y_field, - dsname=dsname, x_bins=args.x_bins, - y_bins=args.y_bins, set_title=args.set_title, + x_field=args.x_field, + y_field=args.y_field, + dsname=dsname, + x_bins=args.x_bins, + y_bins=args.y_bins, + set_title=args.set_title, density=args.density, - subplot_index=subplot_index, **args.kwargs) + subplot_index=subplot_index, + **args.kwargs, + ) plt.savefig(args.out_path) plt.show() @@ -498,40 +580,51 @@ def contour(args): display = act.plotting.ContourDisplay(data, figsize=args.figsize) if args.create_contour: - display.create_contour(fields=fields, time=time, function=args.function, - grid_delta=args.grid_delta, - grid_buffer=args.grid_buffer, - subplot_index=args.subplot_index, - **args.kwargs) + display.create_contour( + fields=fields, + time=time, + function=args.function, + grid_delta=args.grid_delta, + grid_buffer=args.grid_buffer, + subplot_index=args.subplot_index, + **args.kwargs, + ) if args.contourf: - display.contourf(x=args.x, y=args.y, z=args.z, - subplot_index=args.subplot_index, - **args.kwargs) + display.contourf( + x=args.x, y=args.y, z=args.z, subplot_index=args.subplot_index, **args.kwargs + ) if args.plot_contour: - display.contour(x=args.x, y=args.y, z=args.z, - subplot_index=args.subplot_index, - **args.kwargs) + display.contour( + x=args.x, y=args.y, z=args.z, subplot_index=args.subplot_index, **args.kwargs + ) if args.vectors_spd_dir: - display.plot_vectors_from_spd_dir(fields=wind_fields, time=time, - mesh=args.mesh, function=args.function, - grid_delta=args.grid_delta, - grid_buffer=args.grid_buffer, - subplot_index=args.subplot_index, - **args.kwargs) + display.plot_vectors_from_spd_dir( + fields=wind_fields, + time=time, + mesh=args.mesh, + function=args.function, + grid_delta=args.grid_delta, + grid_buffer=args.grid_buffer, + subplot_index=args.subplot_index, + **args.kwargs, + ) if args.barbs: - display.barbs(x=args.x, y=args.y, u=args.u, v=args.v, - subplot_index=args.subplot_index, - **args.kwargs) + display.barbs( + x=args.x, y=args.y, u=args.u, v=args.v, subplot_index=args.subplot_index, **args.kwargs + ) if args.plot_station: - display.plot_station(fields=station_fields, time=time, - text_color=args.text_color, - subplot_index=args.subplot_index, - **args.kwargs) + display.plot_station( + fields=station_fields, + time=time, + text_color=args.text_color, + subplot_index=args.subplot_index, + **args.kwargs, + ) plt.savefig(args.out_path) plt.show() @@ -555,8 +648,11 @@ def convert_arg_line_to_args(line): def main(): prefix_char = '@' parser = argparse.ArgumentParser( - description=(f'Create plot from a data file. Can use command line opitons ' - f'or point to a configuration file using {prefix_char} character.')) + description=( + f'Create plot from a data file. Can use command line opitons ' + f'or point to a configuration file using {prefix_char} character.' + ) + ) # Allow user to reference a file by using the @ symbol for a specific # argument value @@ -565,336 +661,817 @@ def main(): # Update the file parsing logic to skip commented lines parser.convert_arg_line_to_args = convert_arg_line_to_args - parser.add_argument('-f', '--file_path', type=str, required=True, - help=('Required: Full path to file for creating Plot. For multiple ' - 'files use terminal syntax for matching muliple files. ' - 'For example "sgpmetE13.b1.202007*.*.nc" will match all files ' - 'for the month of July in 2020. Need to use double quotes ' - 'to stop terminal from expanding the search, and let the ' - 'python program perform search.')) + parser.add_argument( + '-f', + '--file_path', + type=str, + required=True, + help=( + 'Required: Full path to file for creating Plot. For multiple ' + 'files use terminal syntax for matching muliple files. ' + 'For example "sgpmetE13.b1.202007*.*.nc" will match all files ' + 'for the month of July in 2020. Need to use double quotes ' + 'to stop terminal from expanding the search, and let the ' + 'python program perform search.' + ), + ) out_path_default = 'image.png' - parser.add_argument('-o', '--out_path', type=str, default=out_path_default, - help=("Full path filename to use for saving image. " - "Default is '{out_path_default}'. If only a path is given " - "will use that path with image name '{out_path_default}', " - "else will use filename given.")) - parser.add_argument('-fd', '--field', type=str, default=None, - help='Name of the field to plot') - parser.add_argument('-fds', '--fields', nargs='+', - type=str, default=None, - help='Name of the fields to use to plot') - parser.add_argument('-wfs', '--wind_fields', nargs='+', - type=str, default=None, - help='Wind field names used to plot') - parser.add_argument('-sfs', '--station_fields', nargs='+', - type=str, default=None, - help='Station field names to plot sites') + parser.add_argument( + '-o', + '--out_path', + type=str, + default=out_path_default, + help=( + "Full path filename to use for saving image. " + "Default is '{out_path_default}'. If only a path is given " + "will use that path with image name '{out_path_default}', " + "else will use filename given." + ), + ) + parser.add_argument('-fd', '--field', type=str, default=None, help='Name of the field to plot') + parser.add_argument( + '-fds', + '--fields', + nargs='+', + type=str, + default=None, + help='Name of the fields to use to plot', + ) + parser.add_argument( + '-wfs', + '--wind_fields', + nargs='+', + type=str, + default=None, + help='Wind field names used to plot', + ) + parser.add_argument( + '-sfs', + '--station_fields', + nargs='+', + type=str, + default=None, + help='Station field names to plot sites', + ) default = 'lat' - parser.add_argument('-lat', '--latitude', type=str, default=default, - help=f"Name of latitude variable in file. Default is '{default}'") + parser.add_argument( + '-lat', + '--latitude', + type=str, + default=default, + help=f"Name of latitude variable in file. Default is '{default}'", + ) default = 'lon' - parser.add_argument('-lon', '--longitude', type=str, default=default, - help=f"Name of longitude variable in file. Default is '{default}'") - parser.add_argument('-xf', '--x_field', type=str, default=None, - help='Name of variable to plot on x axis') - parser.add_argument('-yf', '--y_field', type=str, default=None, - help='Name of variable to plot on y axis') - parser.add_argument('-x', type=np.array, - help='x coordinates or grid for z') - parser.add_argument('-y', type=np.array, - help='y coordinates or grid for z') - parser.add_argument('-z', type=np.array, - help='Values over which to contour') + parser.add_argument( + '-lon', + '--longitude', + type=str, + default=default, + help=f"Name of longitude variable in file. Default is '{default}'", + ) + parser.add_argument( + '-xf', '--x_field', type=str, default=None, help='Name of variable to plot on x axis' + ) + parser.add_argument( + '-yf', '--y_field', type=str, default=None, help='Name of variable to plot on y axis' + ) + parser.add_argument('-x', type=np.array, help='x coordinates or grid for z') + parser.add_argument('-y', type=np.array, help='y coordinates or grid for z') + parser.add_argument('-z', type=np.array, help='Values over which to contour') default = 'u_wind' - parser.add_argument('-u', '--u_wind', type=str, default=default, - help=f"File variable name for u_wind wind component. Default is '{default}'") + parser.add_argument( + '-u', + '--u_wind', + type=str, + default=default, + help=f"File variable name for u_wind wind component. Default is '{default}'", + ) default = 'v_wind' - parser.add_argument('-v', '--v_wind', type=str, default=default, - help=f"File variable name for v_wind wind compenent. Default is '{default}'") + parser.add_argument( + '-v', + '--v_wind', + type=str, + default=default, + help=f"File variable name for v_wind wind compenent. Default is '{default}'", + ) default = 'pres' - parser.add_argument('-pf', '--p_field', type=str, default=default, - help=f"File variable name for pressure. Default is '{default}'") + parser.add_argument( + '-pf', + '--p_field', + type=str, + default=default, + help=f"File variable name for pressure. Default is '{default}'", + ) default = 'tdry' - parser.add_argument('-tf', '--t_field', type=str, default=default, - help=f"File variable name for temperature. Default is '{default}'") + parser.add_argument( + '-tf', + '--t_field', + type=str, + default=default, + help=f"File variable name for temperature. Default is '{default}'", + ) default = 'dp' - parser.add_argument('-tdf', '--td_field', type=str, default=default, - help=f"File variable name for dewpoint temperature. Default is '{default}'") + parser.add_argument( + '-tdf', + '--td_field', + type=str, + default=default, + help=f"File variable name for dewpoint temperature. Default is '{default}'", + ) default = 'wspd' - parser.add_argument('-sf', '--spd_field', type=str, default=default, - help=f"File variable name for wind speed. Default is '{default}'") + parser.add_argument( + '-sf', + '--spd_field', + type=str, + default=default, + help=f"File variable name for wind speed. Default is '{default}'", + ) default = 'deg' - parser.add_argument('-df', '--dir_field', type=str, default=default, - help=f"File variable name for wind direction. Default is '{default}'") - parser.add_argument('-al', '--alt_label', type=str, default=None, - help='Altitude axis label') + parser.add_argument( + '-df', + '--dir_field', + type=str, + default=default, + help=f"File variable name for wind direction. Default is '{default}'", + ) + parser.add_argument('-al', '--alt_label', type=str, default=None, help='Altitude axis label') default = 'alt' - parser.add_argument('-af', '--alt_field', type=str, default=default, - help=f"File variable name for altitude. Default is '{default}'") + parser.add_argument( + '-af', + '--alt_field', + type=str, + default=default, + help=f"File variable name for altitude. Default is '{default}'", + ) global _default_dsname _default_dsname = 'act_datastream' - parser.add_argument('-ds', '--dsname', type=str, default=_default_dsname, - help=f"Name of datastream to plot. Default is '{_default_dsname}'") + parser.add_argument( + '-ds', + '--dsname', + type=str, + default=_default_dsname, + help=f"Name of datastream to plot. Default is '{_default_dsname}'", + ) default = '(0, )' - parser.add_argument('-si', '--subplot_index', type=ast.literal_eval, - default=default, - help=f'Index of the subplot via tuple syntax. ' - f'Example for two plots is "(0,), (1,)". ' - f"Default is '{default}'") - default = (1, ) - parser.add_argument('-ss', '--subplot_shape', nargs='+', type=int, - default=default, - help=(f'The number of (rows, columns) ' - f'for the subplots in the display. ' - f'Default is {default}')) + parser.add_argument( + '-si', + '--subplot_index', + type=ast.literal_eval, + default=default, + help=f'Index of the subplot via tuple syntax. ' + f'Example for two plots is "(0,), (1,)". ' + f"Default is '{default}'", + ) + default = (1,) + parser.add_argument( + '-ss', + '--subplot_shape', + nargs='+', + type=int, + default=default, + help=( + f'The number of (rows, columns) ' + f'for the subplots in the display. ' + f'Default is {default}' + ), + ) plot_type_options = ['plot', 'qc'] - parser.add_argument('-pt', '--plot_type', nargs='+', type=str, - help=f'Type of plot to make. Current options include: ' - f'{plot_type_options}') - parser.add_argument('-vy', '--var_y', type=str, default=None, - help=('Set this to the name of a data variable in ' - 'the Dataset to use as the y-axis variable ' - 'instead of the default dimension.')) - parser.add_argument('-plp', '--plevels_plot', - type=np.array, default=None, - help='Pressure levels to plot the wind barbs on.') - parser.add_argument('-cbl', '--cb_label', type=str, default=None, - help='Colorbar label to use') - parser.add_argument('-st', '--set_title', type=str, default=None, - help='Title for the plot') + parser.add_argument( + '-pt', + '--plot_type', + nargs='+', + type=str, + help=f'Type of plot to make. Current options include: ' f'{plot_type_options}', + ) + parser.add_argument( + '-vy', + '--var_y', + type=str, + default=None, + help=( + 'Set this to the name of a data variable in ' + 'the Dataset to use as the y-axis variable ' + 'instead of the default dimension.' + ), + ) + parser.add_argument( + '-plp', + '--plevels_plot', + type=np.array, + default=None, + help='Pressure levels to plot the wind barbs on.', + ) + parser.add_argument('-cbl', '--cb_label', type=str, default=None, help='Colorbar label to use') + parser.add_argument('-st', '--set_title', type=str, default=None, help='Title for the plot') default = 0.08 - parser.add_argument('-pb', '--plot_buffer', type=float, default=default, - help=(f'Buffer to add around data on plot in lat ' - f'and lon dimension. Default is {default}')) + parser.add_argument( + '-pb', + '--plot_buffer', + type=float, + default=default, + help=( + f'Buffer to add around data on plot in lat ' f'and lon dimension. Default is {default}' + ), + ) default = 'terrain-background' - parser.add_argument('-sm', '--stamen', type=str, default=default, - help=f"Dataset to use for background image. Default is '{default}'") + parser.add_argument( + '-sm', + '--stamen', + type=str, + default=default, + help=f"Dataset to use for background image. Default is '{default}'", + ) default = 8 - parser.add_argument('-tl', '--tile', type=int, default=default, - help=f'Tile zoom to use with background image. Default is {default}') - parser.add_argument('-cfs', '--cfeatures', nargs='+', type=str, default=None, - help='Cartopy feature to add to plot') - parser.add_argument('-txt', '--text', type=json.loads, default=None, - help=('Dictionary of {text:[lon,lat]} to add to plot. ' - 'Can have more than one set of text to add.')) + parser.add_argument( + '-tl', + '--tile', + type=int, + default=default, + help=f'Tile zoom to use with background image. Default is {default}', + ) + parser.add_argument( + '-cfs', + '--cfeatures', + nargs='+', + type=str, + default=None, + help='Cartopy feature to add to plot', + ) + parser.add_argument( + '-txt', + '--text', + type=json.loads, + default=None, + help=( + 'Dictionary of {text:[lon,lat]} to add to plot. ' + 'Can have more than one set of text to add.' + ), + ) default = 'rainbow' - parser.add_argument('-cm', '--cmap', default=default, - help=f"colormap to use. Defaut is '{default}'") - parser.add_argument('-abl', '--abs_limits', nargs='+', type=float, - default=(None, None), - help=('Sets the bounds on plot limits even if data ' - 'values exceed those limits. Y axis limits. Default is no limits.')) - parser.add_argument('-tr', '--time_rng', nargs='+', type=float, default=None, - help=('List or tuple with (min,max) values to set the ' - 'x-axis range limits')) + parser.add_argument( + '-cm', '--cmap', default=default, help=f"colormap to use. Defaut is '{default}'" + ) + parser.add_argument( + '-abl', + '--abs_limits', + nargs='+', + type=float, + default=(None, None), + help=( + 'Sets the bounds on plot limits even if data ' + 'values exceed those limits. Y axis limits. Default is no limits.' + ), + ) + parser.add_argument( + '-tr', + '--time_rng', + nargs='+', + type=float, + default=None, + help=('List or tuple with (min,max) values to set the ' 'x-axis range limits'), + ) default = 20 - parser.add_argument('-nd', '--num_dir', type=int, default=default, - help=(f'Number of directions to splot the wind rose into. ' - f'Default is {default}')) - parser.add_argument('-sb', '--spd_bins', nargs='+', type=float, default=None, - help='Bin boundaries to sort the wind speeds into') + parser.add_argument( + '-nd', + '--num_dir', + type=int, + default=default, + help=(f'Number of directions to splot the wind rose into. ' f'Default is {default}'), + ) + parser.add_argument( + '-sb', + '--spd_bins', + nargs='+', + type=float, + default=None, + help='Bin boundaries to sort the wind speeds into', + ) default = 3 - parser.add_argument('-ti', '--tick_interval', type=int, default=default, - help=(f'Interval (in percentage) for the ticks ' - f'on the radial axis. Default is {default}')) - parser.add_argument('-ac', '--assessment_color', type=json.loads, - default=None, - help=('dictionary lookup to override default ' - 'assessment to color')) + parser.add_argument( + '-ti', + '--tick_interval', + type=int, + default=default, + help=( + f'Interval (in percentage) for the ticks ' f'on the radial axis. Default is {default}' + ), + ) + parser.add_argument( + '-ac', + '--assessment_color', + type=json.loads, + default=None, + help=('dictionary lookup to override default ' 'assessment to color'), + ) default = False - parser.add_argument('-ao', '--assessment_overplot', - default=default, action='store_true', - help=(f'Option to overplot quality control colored ' - f'symbols over plotted data using ' - f'flag_assessment categories. Default is {default}')) - default = {'Incorrect': ['Bad', 'Incorrect'], - 'Suspect': ['Indeterminate', 'Suspect']} - parser.add_argument('-oc', '--overplot_category', type=json.loads, default=default, - help=(f'Look up to categorize assessments into groups. ' - f'This allows using multiple terms for the same ' - f'quality control level of failure. ' - f'Also allows adding more to the defaults. Default is {default}')) + parser.add_argument( + '-ao', + '--assessment_overplot', + default=default, + action='store_true', + help=( + f'Option to overplot quality control colored ' + f'symbols over plotted data using ' + f'flag_assessment categories. Default is {default}' + ), + ) + default = {'Incorrect': ['Bad', 'Incorrect'], 'Suspect': ['Indeterminate', 'Suspect']} + parser.add_argument( + '-oc', + '--overplot_category', + type=json.loads, + default=default, + help=( + f'Look up to categorize assessments into groups. ' + f'This allows using multiple terms for the same ' + f'quality control level of failure. ' + f'Also allows adding more to the defaults. Default is {default}' + ), + ) default = {'Incorrect': 'red', 'Suspect': 'orange'} - parser.add_argument('-co', '--category_color', type=json.loads, - default=default, - help=(f'Lookup to match overplot category color to ' - f'assessment grouping. Default is {default}')) - parser.add_argument('-flp', '--force_line_plot', default=False, - action='store_true', - help='Option to plot 2D data as 1D line plots') - parser.add_argument('-l', '--labels', nargs='+', default=False, - type=str, - help=('Option to overwrite the legend labels. ' - 'Must have same dimensions as number of ' - 'lines plottes.')) - parser.add_argument('-sy', '--secondary_y', default=False, action='store_true', - help='Option to plot on secondary y axis') + parser.add_argument( + '-co', + '--category_color', + type=json.loads, + default=default, + help=( + f'Lookup to match overplot category color to ' + f'assessment grouping. Default is {default}' + ), + ) + parser.add_argument( + '-flp', + '--force_line_plot', + default=False, + action='store_true', + help='Option to plot 2D data as 1D line plots', + ) + parser.add_argument( + '-l', + '--labels', + nargs='+', + default=False, + type=str, + help=( + 'Option to overwrite the legend labels. ' + 'Must have same dimensions as number of ' + 'lines plottes.' + ), + ) + parser.add_argument( + '-sy', + '--secondary_y', + default=False, + action='store_true', + help='Option to plot on secondary y axis', + ) if CARTOPY_AVAILABLE: default = ccrs.PlateCarree() - parser.add_argument('-prj', '--projection', type=str, - default=default, - help=f"Projection to use on plot. Default is {default}") + parser.add_argument( + '-prj', + '--projection', + type=str, + default=default, + help=f"Projection to use on plot. Default is {default}", + ) default = 20 - parser.add_argument('-bx', '--num_barb_x', type=int, default=default, - help=f'Number of wind barbs to plot in the x axis. Default is {default}') + parser.add_argument( + '-bx', + '--num_barb_x', + type=int, + default=default, + help=f'Number of wind barbs to plot in the x axis. Default is {default}', + ) default = 20 - parser.add_argument('-by', '--num_barb_y', type=int, default=default, - help=f"Number of wind barbs to plot in the y axis. Default is {default}") + parser.add_argument( + '-by', + '--num_barb_y', + type=int, + default=default, + help=f"Number of wind barbs to plot in the y axis. Default is {default}", + ) default = 20 - parser.add_argument('-tp', '--num_time_periods', type=int, default=default, - help=f'Set how many time periods. Default is {default}') - parser.add_argument('-bn', '--bins', nargs='+', type=int, default=None, - help='histogram bin boundaries to use') - parser.add_argument('-bf', '--bin_field', type=str, default=None, - help=('name of the field that stores the ' - 'bins for the spectra')) - parser.add_argument('-xb', '--x_bins', nargs='+', type=int, default=None, - help='Histogram bin boundaries to use for x axis variable') - parser.add_argument('-yb', '--y_bins', nargs='+', type=int, default=None, - help='Histogram bin boundaries to use for y axis variable') - parser.add_argument('-t', '--time', type=str, default=None, - help='Time period to be plotted') - parser.add_argument('-sbf', '--sortby_field', type=str, default=None, - help='Sort histograms by a given field parameter') - parser.add_argument('-sbb', '--sortby_bins', nargs='+', type=int, - default=None, - help='Bins to sort the histograms by') + parser.add_argument( + '-tp', + '--num_time_periods', + type=int, + default=default, + help=f'Set how many time periods. Default is {default}', + ) + parser.add_argument( + '-bn', '--bins', nargs='+', type=int, default=None, help='histogram bin boundaries to use' + ) + parser.add_argument( + '-bf', + '--bin_field', + type=str, + default=None, + help=('name of the field that stores the ' 'bins for the spectra'), + ) + parser.add_argument( + '-xb', + '--x_bins', + nargs='+', + type=int, + default=None, + help='Histogram bin boundaries to use for x axis variable', + ) + parser.add_argument( + '-yb', + '--y_bins', + nargs='+', + type=int, + default=None, + help='Histogram bin boundaries to use for y axis variable', + ) + parser.add_argument('-t', '--time', type=str, default=None, help='Time period to be plotted') + parser.add_argument( + '-sbf', + '--sortby_field', + type=str, + default=None, + help='Sort histograms by a given field parameter', + ) + parser.add_argument( + '-sbb', + '--sortby_bins', + nargs='+', + type=int, + default=None, + help='Bins to sort the histograms by', + ) default = 20 - parser.add_argument('-nyl', '--num_y_levels', type=int, default=default, - help=f'Number of levels in the y axis to use. Default is {default}') - parser.add_argument('-sk', '--sel_kwargs', type=json.loads, default=None, - help=('The keyword arguments to pass into ' - ':py:func:`xarray.DataArray.sel`')) - parser.add_argument('-ik', '--isel_kwargs', type=json.loads, default=None, - help=('The keyword arguments to pass into ' - ':py:func:`xarray.DataArray.sel`')) + parser.add_argument( + '-nyl', + '--num_y_levels', + type=int, + default=default, + help=f'Number of levels in the y axis to use. Default is {default}', + ) + parser.add_argument( + '-sk', + '--sel_kwargs', + type=json.loads, + default=None, + help=('The keyword arguments to pass into ' ':py:func:`xarray.DataArray.sel`'), + ) + parser.add_argument( + '-ik', + '--isel_kwargs', + type=json.loads, + default=None, + help=('The keyword arguments to pass into ' ':py:func:`xarray.DataArray.sel`'), + ) default = 'cubic' - parser.add_argument('-fn', '--function', type=str, default=default, - help=(f'Defaults to cubic function for interpolation. ' - f'See scipy.interpolate.Rbf for additional options. ' - f'Default is {default}')) + parser.add_argument( + '-fn', + '--function', + type=str, + default=default, + help=( + f'Defaults to cubic function for interpolation. ' + f'See scipy.interpolate.Rbf for additional options. ' + f'Default is {default}' + ), + ) default = 0.1 - parser.add_argument('-gb', '--grid_buffer', type=float, default=default, - help=f'Buffer to apply to grid. Default is {default}') + parser.add_argument( + '-gb', + '--grid_buffer', + type=float, + default=default, + help=f'Buffer to apply to grid. Default is {default}', + ) default = (0.01, 0.01) - parser.add_argument('-gd', '--grid_delta', nargs='+', - type=float, default=default, - help=f'X and Y deltas for creating grid. Default is {default}') - parser.add_argument('-fg', '--figsize', nargs='+', type=float, - default=None, - help='Width and height in inches of figure') + parser.add_argument( + '-gd', + '--grid_delta', + nargs='+', + type=float, + default=default, + help=f'X and Y deltas for creating grid. Default is {default}', + ) + parser.add_argument( + '-fg', + '--figsize', + nargs='+', + type=float, + default=None, + help='Width and height in inches of figure', + ) default = 'white' - parser.add_argument('-tc', '--text_color', type=str, default=default, - help=f"Color of text. Default is '{default}'") - parser.add_argument('-kwargs', type=json.loads, default=dict(), - help='keyword arguments to use in plotting function') - parser.add_argument('-pk', '--plot_kwargs', type=json.loads, default=dict(), - help=("Additional keyword arguments to pass " - "into MetPy's SkewT.plot")) - parser.add_argument('-pbk', '--plot_barbs_kwargs', type=json.loads, - default=dict(), - help=("Additional keyword arguments to pass " - "into MetPy's SkewT.plot_barbs")) + parser.add_argument( + '-tc', + '--text_color', + type=str, + default=default, + help=f"Color of text. Default is '{default}'", + ) + parser.add_argument( + '-kwargs', + type=json.loads, + default=dict(), + help='keyword arguments to use in plotting function', + ) + parser.add_argument( + '-pk', + '--plot_kwargs', + type=json.loads, + default=dict(), + help=("Additional keyword arguments to pass " "into MetPy's SkewT.plot"), + ) + parser.add_argument( + '-pbk', + '--plot_barbs_kwargs', + type=json.loads, + default=dict(), + help=("Additional keyword arguments to pass " "into MetPy's SkewT.plot_barbs"), + ) default = True - parser.add_argument('-cu', '--cleanup', default=default, action='store_false', - help=f'Turn off standard methods for obj cleanup. Default is {default}') - parser.add_argument('-gl', '--gridlines', default=False, action='store_true', - help='Use latitude and lingitude gridlines.') - parser.add_argument('-cl', '--coastlines', default=False, action='store_true', - help='Plot coastlines on geographical map') - parser.add_argument('-bg', '--background', default=False, action='store_true', - help='Plot a stock image background') - parser.add_argument('-nan', '--add_nan', default=False, action='store_true', - help='Fill in data gaps with NaNs') - parser.add_argument('-dn', '--day_night', default=False, action='store_true', - help=("Fill in color coded background according " - "to time of day.")) - parser.add_argument('-yr', '--set_yrange', default=None, nargs=2, - help=("Set the yrange for the specific plot")) - parser.add_argument('-iya', '--invert_y_axis', default=False, - action='store_true', - help='Invert y axis') - parser.add_argument('-sp', '--show_parcel', default=False, action='store_true', - help='set to true to plot the parcel path.') - parser.add_argument('-cape', '--shade_cape', default=False, - action='store_true', - help='set to true to shade regions of cape.') - parser.add_argument('-cin', '--shade_cin', default=False, action='store_true', - help='set to true to shade regions of cin.') - parser.add_argument('-d', '--density', default=False, action='store_true', - help='Plot a p.d.f. instead of a frequency histogram') - parser.add_argument('-m', '--mesh', default=False, action='store_true', - help=('Set to True to interpolate u and v to ' - 'grid and create wind barbs')) - parser.add_argument('-uv', '--from_u_and_v', default=False, action='store_true', - help='Create SkewTPLot with u and v wind') - parser.add_argument('-sd', '--from_spd_and_dir', default=False, action='store_true', - help='Create SkewTPlot with wind speed and direction') - parser.add_argument('-px', '--plot_xsection', default=False, action='store_true', - help='plots a cross section whose x and y coordinates') - parser.add_argument('-pxm', '--xsection_map', default=False, action='store_true', - help='plots a cross section of 2D data on a geographical map') - parser.add_argument('-p', '--plot', default=False, action='store_true', - help='Makes a time series plot') - parser.add_argument('-mp', '--multi_panel', default=False, - action='store_true', - help='Makes a 2 panel timeseries plot') - parser.add_argument('-qc', '--qc', default=False, action='store_true', - help='Create time series plot of embedded quality control values') - parser.add_argument('-fb', '--fill_between', default=False, action='store_true', - help='makes a fill betweem plot based on matplotlib') - parser.add_argument('-bsd', '--barbs_spd_dir', default=False, action='store_true', - help=('Makes time series plot of wind barbs ' - 'using wind speed and dir.')) - parser.add_argument('-buv', '--barbs_u_v', default=False, action='store_true', - help=('Makes time series plot of wind barbs ' - 'using u and v wind components.')) - parser.add_argument('-pxs', '--xsection_from_1d', default=False, - action='store_true', - help='Will plot a time-height cross section from 1D dataset') - parser.add_argument('-ths', '--time_height_scatter', - default=False, action='store_true', - help='Create a scatter time series plot') - parser.add_argument('-sbg', '--stacked_bar_graph', - default=False, action='store_true', - help='Create stacked bar graph histogram') - parser.add_argument('-psd', '--size_dist', default=False, action='store_true', - help='Plots a stairstep plot of size distribution') - parser.add_argument('-sg', '--stairstep', default=False, action='store_true', - help='Plots stairstep plot of a histogram') - parser.add_argument('-hm', '--heatmap', default=False, action='store_true', - help='Plot a heatmap histogram from 2 variables') - parser.add_argument('-cc', '--create_contour', default=False, action='store_true', - help='Extracts, grids, and creates a contour plot') - parser.add_argument('-cf', '--contourf', default=False, action='store_true', - help=('Base function for filled contours if user ' - 'already has data gridded')) - parser.add_argument('-ct', '--plot_contour', default=False, action='store_true', - help=('Base function for contours if user ' - 'already has data gridded')) - parser.add_argument('-vsd', '--vectors_spd_dir', default=False, action='store_true', - help='Extracts, grids, and creates a contour plot.') - parser.add_argument('-b', '--barbs', default=False, action='store_true', - help='Base function for wind barbs.') - parser.add_argument('-ps', '--plot_station', default=False, action='store_true', - help='Extracts, grids, and creates a contour plot') + parser.add_argument( + '-cu', + '--cleanup', + default=default, + action='store_false', + help=f'Turn off standard methods for obj cleanup. Default is {default}', + ) + parser.add_argument( + '-gl', + '--gridlines', + default=False, + action='store_true', + help='Use latitude and lingitude gridlines.', + ) + parser.add_argument( + '-cl', + '--coastlines', + default=False, + action='store_true', + help='Plot coastlines on geographical map', + ) + parser.add_argument( + '-bg', + '--background', + default=False, + action='store_true', + help='Plot a stock image background', + ) + parser.add_argument( + '-nan', '--add_nan', default=False, action='store_true', help='Fill in data gaps with NaNs' + ) + parser.add_argument( + '-dn', + '--day_night', + default=False, + action='store_true', + help=("Fill in color coded background according " "to time of day."), + ) + parser.add_argument( + '-yr', '--set_yrange', default=None, nargs=2, help=("Set the yrange for the specific plot") + ) + parser.add_argument( + '-iya', '--invert_y_axis', default=False, action='store_true', help='Invert y axis' + ) + parser.add_argument( + '-sp', + '--show_parcel', + default=False, + action='store_true', + help='set to true to plot the parcel path.', + ) + parser.add_argument( + '-cape', + '--shade_cape', + default=False, + action='store_true', + help='set to true to shade regions of cape.', + ) + parser.add_argument( + '-cin', + '--shade_cin', + default=False, + action='store_true', + help='set to true to shade regions of cin.', + ) + parser.add_argument( + '-d', + '--density', + default=False, + action='store_true', + help='Plot a p.d.f. instead of a frequency histogram', + ) + parser.add_argument( + '-m', + '--mesh', + default=False, + action='store_true', + help=('Set to True to interpolate u and v to ' 'grid and create wind barbs'), + ) + parser.add_argument( + '-uv', + '--from_u_and_v', + default=False, + action='store_true', + help='Create SkewTPLot with u and v wind', + ) + parser.add_argument( + '-sd', + '--from_spd_and_dir', + default=False, + action='store_true', + help='Create SkewTPlot with wind speed and direction', + ) + parser.add_argument( + '-px', + '--plot_xsection', + default=False, + action='store_true', + help='plots a cross section whose x and y coordinates', + ) + parser.add_argument( + '-pxm', + '--xsection_map', + default=False, + action='store_true', + help='plots a cross section of 2D data on a geographical map', + ) + parser.add_argument( + '-p', '--plot', default=False, action='store_true', help='Makes a time series plot' + ) + parser.add_argument( + '-mp', + '--multi_panel', + default=False, + action='store_true', + help='Makes a 2 panel timeseries plot', + ) + parser.add_argument( + '-qc', + '--qc', + default=False, + action='store_true', + help='Create time series plot of embedded quality control values', + ) + parser.add_argument( + '-fb', + '--fill_between', + default=False, + action='store_true', + help='makes a fill betweem plot based on matplotlib', + ) + parser.add_argument( + '-bsd', + '--barbs_spd_dir', + default=False, + action='store_true', + help=('Makes time series plot of wind barbs ' 'using wind speed and dir.'), + ) + parser.add_argument( + '-buv', + '--barbs_u_v', + default=False, + action='store_true', + help=('Makes time series plot of wind barbs ' 'using u and v wind components.'), + ) + parser.add_argument( + '-pxs', + '--xsection_from_1d', + default=False, + action='store_true', + help='Will plot a time-height cross section from 1D dataset', + ) + parser.add_argument( + '-ths', + '--time_height_scatter', + default=False, + action='store_true', + help='Create a scatter time series plot', + ) + parser.add_argument( + '-sbg', + '--stacked_bar_graph', + default=False, + action='store_true', + help='Create stacked bar graph histogram', + ) + parser.add_argument( + '-psd', + '--size_dist', + default=False, + action='store_true', + help='Plots a stairstep plot of size distribution', + ) + parser.add_argument( + '-sg', + '--stairstep', + default=False, + action='store_true', + help='Plots stairstep plot of a histogram', + ) + parser.add_argument( + '-hm', + '--heatmap', + default=False, + action='store_true', + help='Plot a heatmap histogram from 2 variables', + ) + parser.add_argument( + '-cc', + '--create_contour', + default=False, + action='store_true', + help='Extracts, grids, and creates a contour plot', + ) + parser.add_argument( + '-cf', + '--contourf', + default=False, + action='store_true', + help=('Base function for filled contours if user ' 'already has data gridded'), + ) + parser.add_argument( + '-ct', + '--plot_contour', + default=False, + action='store_true', + help=('Base function for contours if user ' 'already has data gridded'), + ) + parser.add_argument( + '-vsd', + '--vectors_spd_dir', + default=False, + action='store_true', + help='Extracts, grids, and creates a contour plot.', + ) + parser.add_argument( + '-b', '--barbs', default=False, action='store_true', help='Base function for wind barbs.' + ) + parser.add_argument( + '-ps', + '--plot_station', + default=False, + action='store_true', + help='Extracts, grids, and creates a contour plot', + ) # The mutually exclusive but one requried group group = parser.add_mutually_exclusive_group(required=True) - group.add_argument('-gp', '--geodisplay', dest='action', action='store_const', - const=geodisplay, help='Set to genereate a geographic plot') - group.add_argument('-skt', '--skewt', dest='action', action='store_const', - const=skewt, help='Set to genereate a skew-t plot') - group.add_argument('-xs', '--xsection', dest='action', action='store_const', - const=xsection, help='Set to genereate a XSection plot') - group.add_argument('-wr', '--wind_rose', dest='action', action='store_const', - const=wind_rose, help='Set to genereate a wind rose plot') - group.add_argument('-ts', '--timeseries', dest='action', action='store_const', - const=timeseries, help='Set to genereate a timeseries plot') - group.add_argument('-c', '--contour', dest='action', action='store_const', - const=contour, help='Set to genereate a contour plot') - group.add_argument('-hs', '--histogram', dest='action', action='store_const', - const=histogram, help='Set to genereate a histogram plot') + group.add_argument( + '-gp', + '--geodisplay', + dest='action', + action='store_const', + const=geodisplay, + help='Set to genereate a geographic plot', + ) + group.add_argument( + '-skt', + '--skewt', + dest='action', + action='store_const', + const=skewt, + help='Set to genereate a skew-t plot', + ) + group.add_argument( + '-xs', + '--xsection', + dest='action', + action='store_const', + const=xsection, + help='Set to genereate a XSection plot', + ) + group.add_argument( + '-wr', + '--wind_rose', + dest='action', + action='store_const', + const=wind_rose, + help='Set to genereate a wind rose plot', + ) + group.add_argument( + '-ts', + '--timeseries', + dest='action', + action='store_const', + const=timeseries, + help='Set to genereate a timeseries plot', + ) + group.add_argument( + '-c', + '--contour', + dest='action', + action='store_const', + const=contour, + help='Set to genereate a contour plot', + ) + group.add_argument( + '-hs', + '--histogram', + dest='action', + action='store_const', + const=histogram, + help='Set to genereate a histogram plot', + ) args = parser.parse_args() diff --git a/tests/io/test_hysplit.py b/tests/io/test_hysplit.py index 6a889a2f42..162fe49a1c 100644 --- a/tests/io/test_hysplit.py +++ b/tests/io/test_hysplit.py @@ -1,5 +1,4 @@ import act -import matplotlib.pyplot as plt from act.tests import sample_files diff --git a/tests/utils/test_io_utils.py b/tests/utils/test_io_utils.py index 89cfee6ef6..44bb2d1c01 100644 --- a/tests/utils/test_io_utils.py +++ b/tests/utils/test_io_utils.py @@ -1,22 +1,18 @@ -import glob -import os import random import shutil import tempfile -from os import PathLike, chdir, getcwd +from os import PathLike, chdir from pathlib import Path from string import ascii_letters import numpy as np import pytest -from arm_test_data import locate as test_data_locate import act from act.tests import sample_files try: import moviepy.video.io.ImageSequenceClip - MOVIEPY_AVAILABLE = True except ImportError: MOVIEPY_AVAILABLE = False