diff --git a/docs/source/apis.rst b/docs/source/apis.rst new file mode 100644 index 0000000..74e8255 --- /dev/null +++ b/docs/source/apis.rst @@ -0,0 +1,19 @@ +API References +============== + +.. include:: versions.txt + +.. toctree:: + :titlesonly: + :hidden: + + PyNHD + PyGeoHydro + Py3DEP + PyDaymet + PyGridMET + PyNLDAS2 + HydroSignatures + AsyncRetriever + PyGeoOGC + PyGeoUtils \ No newline at end of file diff --git a/docs/source/autoapi/async_retriever/_utils/index.rst b/docs/source/autoapi/async_retriever/_utils/index.rst deleted file mode 100644 index a7a89b5..0000000 --- a/docs/source/autoapi/async_retriever/_utils/index.rst +++ /dev/null @@ -1,69 +0,0 @@ -:orphan: - -:py:mod:`async_retriever._utils` -================================ - -.. py:module:: async_retriever._utils - -.. autoapi-nested-parse:: - - Core async functions. - - - -Module Contents ---------------- - -.. py:class:: BaseRetriever(urls, file_paths = None, read_method = None, request_kwds = None, request_method = 'GET', cache_name = None, ssl = None) - - - Base class for async retriever. - - .. py:method:: generate_requests(urls, request_kwds, file_paths) - :staticmethod: - - Generate urls and keywords. - - - -.. py:function:: create_cachefile(db_name = None) - - Create a cache folder in the current working directory. - - -.. py:function:: delete_url(url, method, cache_name, **kwargs) - :async: - - Delete cached response associated with ``url``. - - -.. py:function:: get_event_loop() - - Create an event loop. - - -.. py:function:: retriever(uid, url, s_kwds, session, read_type, r_kwds, raise_status) - :async: - - Create an async request and return the response as binary. - - :Parameters: * **uid** (:class:`int`) -- ID of the URL for sorting after returning the results - * **url** (:class:`str`) -- URL to be retrieved - * **s_kwds** (:class:`dict`) -- Arguments to be passed to requests - * **session** (:class:`ClientSession`) -- A ClientSession for sending the request - * **read_type** (:class:`str`) -- Return response as ``text``, ``bytes``, or ``json``. - * **r_kwds** (:class:`dict`) -- Keywords to pass to the response read function. - It is ``{"content_type": None}`` if ``read`` is ``json`` - else an empty ``dict``. - * **raise_status** (:class:`bool`) -- Raise an exception if the response status is not 200. If - ``False`` return ``None``. - - :returns: :class:`bytes` -- The retrieved response as binary. - - -.. py:function:: stream_session(url, s_kwds, session, filepath, chunk_size = None) - :async: - - Stream the response to a file. - - diff --git a/docs/source/autoapi/async_retriever/async_retriever/index.rst b/docs/source/autoapi/async_retriever/async_retriever/index.rst deleted file mode 100644 index 0dcccb5..0000000 --- a/docs/source/autoapi/async_retriever/async_retriever/index.rst +++ /dev/null @@ -1,162 +0,0 @@ -async_retriever.async_retriever -=============================== - -.. py:module:: async_retriever.async_retriever - -.. autoapi-nested-parse:: - - Core async functions. - - - - - -Module Contents ---------------- - -.. py:function:: delete_url_cache(url, request_method = 'get', cache_name = None, **kwargs) - - Delete cached response associated with ``url``, along with its history (if applicable). - - :Parameters: * **url** (:class:`str`) -- URL to be deleted from the cache - * **request_method** (:class:`str`, *optional*) -- HTTP request method to be deleted from the cache, defaults to ``GET``. - * **cache_name** (:class:`str`, *optional*) -- Path to a file for caching the session, defaults to - ``./cache/aiohttp_cache.sqlite``. - * **kwargs** (:class:`dict`, *optional*) -- Keywords to pass to the ``cache.delete_url()``. - - -.. py:function:: retrieve(urls, read_method, request_kwds = None, request_method = 'get', limit_per_host = 5, cache_name = None, timeout = 5, expire_after = EXPIRE_AFTER, ssl = True, disable = False, raise_status = True) - - Send async requests. - - :Parameters: * **urls** (:class:`list` of :class:`str`) -- List of URLs. - * **read_method** (:class:`str`) -- Method for returning the request; ``binary``, ``json``, and ``text``. - * **request_kwds** (:class:`list` of :class:`dict`, *optional*) -- List of requests keywords corresponding to input URLs (1 on 1 mapping), - defaults to ``None``. For example, ``[{"params": {...}, "headers": {...}}, ...]``. - * **request_method** (:class:`str`, *optional*) -- Request type; ``GET`` (``get``) or ``POST`` (``post``). Defaults to ``GET``. - * **limit_per_host** (:class:`int`, *optional*) -- Maximum number of simultaneous connections per host, defaults to 5. - * **cache_name** (:class:`str`, *optional*) -- Path to a file for caching the session, defaults to ``./cache/aiohttp_cache.sqlite``. - * **timeout** (:class:`int`, *optional*) -- Requests timeout in seconds, defaults to 5. - * **expire_after** (:class:`int`, *optional*) -- Expiration time for response caching in seconds, defaults to 2592000 (one week). - * **ssl** (:class:`bool` or :class:`SSLContext`, *optional*) -- SSLContext to use for the connection, defaults to None. Set to False to disable - SSL certification verification. - * **disable** (:class:`bool`, *optional*) -- If ``True`` temporarily disable caching requests and get new responses - from the server, defaults to False. - * **raise_status** (:class:`bool`, *optional*) -- Raise an exception if the response status is not 200. If - ``False`` return ``None``. Defaults to ``True``. - - :returns: :class:`list` -- List of responses in the order of input URLs. - - .. rubric:: Examples - - >>> import async_retriever as ar - >>> stations = ["01646500", "08072300", "11073495"] - >>> url = "https://waterservices.usgs.gov/nwis/site" - >>> urls, kwds = zip( - ... *[ - ... (url, {"params": {"format": "rdb", "sites": s, "siteStatus": "all"}}) - ... for s in stations - ... ] - ... ) - >>> resp = ar.retrieve(urls, "text", request_kwds=kwds) - >>> resp[0].split("\n")[-2].split("\t")[1] - '01646500' - - -.. py:function:: retrieve_binary(urls, request_kwds = None, request_method = 'get', limit_per_host = 5, cache_name = None, timeout = 5, expire_after = EXPIRE_AFTER, ssl = True, disable = False, raise_status = True) - - Send async requests and get the response as ``bytes``. - - :Parameters: * **urls** (:class:`list` of :class:`str`) -- List of URLs. - * **request_kwds** (:class:`list` of :class:`dict`, *optional*) -- List of requests keywords corresponding to input URLs (1 on 1 mapping), - defaults to ``None``. For example, ``[{"params": {...}, "headers": {...}}, ...]``. - * **request_method** (:class:`str`, *optional*) -- Request type; ``GET`` (``get``) or ``POST`` (``post``). Defaults to ``GET``. - * **limit_per_host** (:class:`int`, *optional*) -- Maximum number of simultaneous connections per host, defaults to 5. - * **cache_name** (:class:`str`, *optional*) -- Path to a file for caching the session, defaults to ``./cache/aiohttp_cache.sqlite``. - * **timeout** (:class:`int`, *optional*) -- Requests timeout in seconds, defaults to 5. - * **expire_after** (:class:`int`, *optional*) -- Expiration time for response caching in seconds, defaults to 2592000 (one week). - * **ssl** (:class:`bool` or :class:`SSLContext`, *optional*) -- SSLContext to use for the connection, defaults to None. Set to False to disable - SSL certification verification. - * **disable** (:class:`bool`, *optional*) -- If ``True`` temporarily disable caching requests and get new responses - from the server, defaults to False. - * **raise_status** (:class:`bool`, *optional*) -- Raise an exception if the response status is not 200. If - ``False`` return ``None``. Defaults to ``True``. - - :returns: :class:`bytes` -- List of responses in the order of input URLs. - - -.. py:function:: retrieve_json(urls, request_kwds = None, request_method = 'get', limit_per_host = 5, cache_name = None, timeout = 5, expire_after = EXPIRE_AFTER, ssl = True, disable = False, raise_status = True) - - Send async requests and get the response as ``json``. - - :Parameters: * **urls** (:class:`list` of :class:`str`) -- List of URLs. - * **request_kwds** (:class:`list` of :class:`dict`, *optional*) -- List of requests keywords corresponding to input URLs (1 on 1 mapping), - defaults to ``None``. For example, ``[{"params": {...}, "headers": {...}}, ...]``. - * **request_method** (:class:`str`, *optional*) -- Request type; ``GET`` (``get``) or ``POST`` (``post``). Defaults to ``GET``. - * **limit_per_host** (:class:`int`, *optional*) -- Maximum number of simultaneous connections per host, defaults to 5. - * **cache_name** (:class:`str`, *optional*) -- Path to a file for caching the session, defaults to ``./cache/aiohttp_cache.sqlite``. - * **timeout** (:class:`int`, *optional*) -- Requests timeout in seconds, defaults to 5. - * **expire_after** (:class:`int`, *optional*) -- Expiration time for response caching in seconds, defaults to 2592000 (one week). - * **ssl** (:class:`bool` or :class:`SSLContext`, *optional*) -- SSLContext to use for the connection, defaults to None. Set to False to disable - SSL certification verification. - * **disable** (:class:`bool`, *optional*) -- If ``True`` temporarily disable caching requests and get new responses - from the server, defaults to False. - * **raise_status** (:class:`bool`, *optional*) -- Raise an exception if the response status is not 200. If - ``False`` return ``None``. Defaults to ``True``. - - :returns: :class:`dict` -- List of responses in the order of input URLs. - - .. rubric:: Examples - - >>> import async_retriever as ar - >>> urls = ["https://labs.waterdata.usgs.gov/api/nldi/linked-data/comid/position"] - >>> kwds = [ - ... { - ... "params": { - ... "f": "json", - ... "coords": "POINT(-68.325 45.0369)", - ... }, - ... }, - ... ] - >>> r = ar.retrieve_json(urls, kwds) - >>> print(r[0]["features"][0]["properties"]["identifier"]) - 2675320 - - -.. py:function:: retrieve_text(urls, request_kwds = None, request_method = 'get', limit_per_host = 5, cache_name = None, timeout = 5, expire_after = EXPIRE_AFTER, ssl = True, disable = False, raise_status = True) - - Send async requests and get the response as ``text``. - - :Parameters: * **urls** (:class:`list` of :class:`str`) -- List of URLs. - * **request_kwds** (:class:`list` of :class:`dict`, *optional*) -- List of requests keywords corresponding to input URLs (1 on 1 mapping), - defaults to ``None``. For example, ``[{"params": {...}, "headers": {...}}, ...]``. - * **request_method** (:class:`str`, *optional*) -- Request type; ``GET`` (``get``) or ``POST`` (``post``). Defaults to ``GET``. - * **limit_per_host** (:class:`int`, *optional*) -- Maximum number of simultaneous connections per host, defaults to 5. - * **cache_name** (:class:`str`, *optional*) -- Path to a file for caching the session, defaults to ``./cache/aiohttp_cache.sqlite``. - * **timeout** (:class:`int`, *optional*) -- Requests timeout in seconds in seconds, defaults to 5. - * **expire_after** (:class:`int`, *optional*) -- Expiration time for response caching in seconds, defaults to 2592000 (one week). - * **ssl** (:class:`bool` or :class:`SSLContext`, *optional*) -- SSLContext to use for the connection, defaults to None. Set to False to disable - SSL certification verification. - * **disable** (:class:`bool`, *optional*) -- If ``True`` temporarily disable caching requests and get new responses - from the server, defaults to False. - * **raise_status** (:class:`bool`, *optional*) -- Raise an exception if the response status is not 200. If - ``False`` return ``None``. Defaults to ``True``. - - :returns: :class:`list` -- List of responses in the order of input URLs. - - .. rubric:: Examples - - >>> import async_retriever as ar - >>> stations = ["01646500", "08072300", "11073495"] - >>> url = "https://waterservices.usgs.gov/nwis/site" - >>> urls, kwds = zip( - ... *[ - ... (url, {"params": {"format": "rdb", "sites": s, "siteStatus": "all"}}) - ... for s in stations - ... ] - ... ) - >>> resp = ar.retrieve_text(urls, kwds) - >>> resp[0].split("\n")[-2].split("\t")[1] - '01646500' - - diff --git a/docs/source/autoapi/async_retriever/index.rst b/docs/source/autoapi/async_retriever/index.rst deleted file mode 100644 index cd717fb..0000000 --- a/docs/source/autoapi/async_retriever/index.rst +++ /dev/null @@ -1,21 +0,0 @@ -async_retriever -=============== - -.. py:module:: async_retriever - -.. autoapi-nested-parse:: - - Top-level package. - - - -Submodules ----------- - -.. toctree:: - :maxdepth: 1 - - /autoapi/async_retriever/async_retriever/index - /autoapi/async_retriever/streaming/index - - diff --git a/docs/source/autoapi/async_retriever/streaming/index.rst b/docs/source/autoapi/async_retriever/streaming/index.rst deleted file mode 100644 index 4185af1..0000000 --- a/docs/source/autoapi/async_retriever/streaming/index.rst +++ /dev/null @@ -1,36 +0,0 @@ -async_retriever.streaming -========================= - -.. py:module:: async_retriever.streaming - -.. autoapi-nested-parse:: - - Download multiple files concurrently by streaming their content to disk. - - - - - -Module Contents ---------------- - -.. py:function:: stream_write(urls, file_paths, request_method = 'get', ssl = True, chunk_size = CHUNK_SIZE, limit_per_host = 5) - - Download multiple files concurrently by streaming their content to disk. - - :Parameters: * **urls** (:class:`Sequence[str]`) -- List of URLs to download. - * **file_paths** (:class:`Sequence[Path]`) -- List of file paths to save the downloaded content. - * **request_method** (``{"get", "post"}``, *optional*) -- HTTP method to use (i.e., ``get`` or ``post``), by default ``get``. - * **ssl** (:class:`bool` or :class:`ssl.SSLContext`, *optional*) -- Whether to verify SSL certificates, by default True. Also, - an SSLContext object can be passed to customize - * **chunk_size** (:class:`int`, *optional*) -- Size of each chunk in bytes, by default 1 MB. - * **limit_per_host** (:class:`int`, *optional*) -- Maximum simultaneous connections per host, by default 5. - - .. rubric:: Examples - - >>> import tempfile - >>> url = "https://freetestdata.com/wp-content/uploads/2021/09/Free_Test_Data_500KB_CSV-1.csv" - >>> with tempfile.NamedTemporaryFile(dir=".") as temp: - ... stream_write([url], [temp.name]) - - diff --git a/docs/source/autoapi/hydrosignatures/baseflow/index.rst b/docs/source/autoapi/hydrosignatures/baseflow/index.rst deleted file mode 100644 index d0dbfff..0000000 --- a/docs/source/autoapi/hydrosignatures/baseflow/index.rst +++ /dev/null @@ -1,80 +0,0 @@ -hydrosignatures.baseflow -======================== - -.. py:module:: hydrosignatures.baseflow - -.. autoapi-nested-parse:: - - Function for computing hydrologic signature. - - - - - -Module Contents ---------------- - -.. py:function:: baseflow(discharge, alpha = 0.925, n_passes = 1, pad_width = 10) - - Extract baseflow using the Lyne and Hollick filter (Ladson et al., 2013). - - :Parameters: * **discharge** (:class:`numpy.ndarray` or :class:`pandas.DataFrame` or :class:`pandas.Series` or :class:`xarray.DataArray`) -- Discharge time series that must not have any missing values. It can also be a 2D array - where each row is a time series. - * **n_passes** (:class:`int`, *optional*) -- Number of filter passes, defaults to 1. - * **alpha** (:class:`float`, *optional*) -- Filter parameter that must be between 0 and 1, defaults to 0.925. - * **pad_width** (:class:`int`, *optional*) -- Padding width for extending the data from both ends to address the warm up issue. - - :returns: :class:`numpy.ndarray` or :class:`pandas.DataFrame` or :class:`pandas.Series` or :class:`xarray.DataArray` -- Same discharge input array-like but values replaced with computed baseflow values. - - -.. py:function:: baseflow_index(discharge, alpha = 0.925, n_passes = 3, pad_width = 10) - - Compute the baseflow index using the Lyne and Hollick filter (Ladson et al., 2013). - - :Parameters: * **discharge** (:class:`numpy.ndarray` or :class:`pandas.DataFrame` or :class:`pandas.Series` or :class:`xarray.DataArray`) -- Discharge time series that must not have any missing values. It can also be a 2D array - where each row is a time series. - * **n_passes** (:class:`int`, *optional*) -- Number of filter passes, defaults to 3. It must be an odd number greater than 3. - * **alpha** (:class:`float`, *optional*) -- Filter parameter that must be between 0 and 1, defaults to 0.925. - * **pad_width** (:class:`int`, *optional*) -- Padding width for extending the data from both ends to address the warm up issue. - - :returns: :class:`numpy.float64` -- The baseflow index. - - -.. py:function:: baseflow_recession(streamflow, freq = 1.0, recession_length = 15, n_start = 0, eps = 0, start_of_recession = 'baseflow', fit_method = 'nonparametric_analytic', lyne_hollick_smoothing = 0.925) - - Calculate baseflow recession constant and master recession curve. - - .. rubric:: Notes - - This function is ported from the TOSSH Matlab toolbox, which is based on the - following publication: - - Gnann, S.J., Coxon, G., Woods, R.A., Howden, N.J.K., McMillan H.K., 2021. - TOSSH: A Toolbox for Streamflow Signatures in Hydrology. - Environmental Modelling & Software. - https://doi.org/10.1016/j.envsoft.2021.104983 - - This function calculates baseflow recession constant assuming exponential - recession behaviour (Safeeq et al., 2013). Master recession curve (MRC) is - constructed using the adapted matching strip method (Posavec et al., - 2006). - - According to Safeeq et al. (2013), :math:`K < 0.065` represent groundwater - dominated slow-draining systems, :math:`K >= 0.065` represent shallow subsurface - flow dominated fast draining systems. - - :Parameters: * **streamflow** (:class:`numpy.ndarray`) -- Streamflow as a 1D array. - * **freq** (:class:`float`, *optional*) -- Frequency of steamflow in number of days. Default is 1, i.e., daily streamflow. - * **recession_length** (:class:`int`, *optional*) -- Minimum length of recessions [days]. Default is 15. - * **n_start** (:class:`int`, *optional*) -- Days to be removed after start of recession. Default is 0. - * **eps** (:class:`float`, *optional*) -- Allowed increase in flow during recession period. Default is 0. - * **start_of_recession** (``{'baseflow', 'peak'}``, *optional*) -- Define start of recession. Default is 'baseflow'. - * **fit_method** (``{'nonparametric_analytic', 'exponential'}``, *optional*) -- Method to fit mrc. Default is 'nonparametric_analytic'. - * **lyne_hollick_smoothing** (:class:`float`, *optional*) -- Smoothing parameter of Lyne-Hollick filter. Default is 0.925. - - :returns: * **mrc** (:class:`numpy.ndarray`) -- Master Recession Curve as 2D array of [time, flow]. - * **bf_recession_k** (:class:`float`) -- Baseflow Recession Constant [1/day]. - - :raises ValueError: If no recession segments are found or if a complex BaseflowRecessionK is calculated. - - diff --git a/docs/source/autoapi/hydrosignatures/hydrosignatures/index.rst b/docs/source/autoapi/hydrosignatures/hydrosignatures/index.rst deleted file mode 100644 index d346b4c..0000000 --- a/docs/source/autoapi/hydrosignatures/hydrosignatures/index.rst +++ /dev/null @@ -1,224 +0,0 @@ -hydrosignatures.hydrosignatures -=============================== - -.. py:module:: hydrosignatures.hydrosignatures - -.. autoapi-nested-parse:: - - Function for computing hydrologic signature. - - - - - - - -Module Contents ---------------- - -.. py:class:: HydroSignatures - - Hydrological signatures. - - :Parameters: * **q_mmpt** (:class:`pandas.Series`) -- Discharge in mm per unit time (the same timescale as precipitation). - * **p_mmpt** (:class:`pandas.Series`) -- Precipitation in mm per unit time (the same timescale as discharge). - * **si_method** (:class:`str`, *optional*) -- Seasonality index method. Either ``walsh`` or ``markham``. Default is ``walsh``. - * **fdc_slope_bins** (:class:`tuple` of :class:`int`, *optional*) -- The percentage bins between 1-100 to compute the slope of FDC within it, - defaults to ``(33, 67)``. - * **bfi_alpha** (:class:`float`, *optional*) -- Alpha parameter for baseflow separation filter using Lyne and Hollick method. - Default is ``0.925``. - - - .. py:method:: bfi() - - Compute Baseflow Index. - - - - .. py:method:: diff(other) - - Compute absolute difference between two hydrological signatures. - - - - .. py:method:: fdc() - - Compute exceedance probability (for flow duration curve). - - - - .. py:method:: fdc_slope() - - Compute FDC slopes between a list of lower and upper percentiles. - - - - .. py:method:: isclose(other) - - Check if the signatures are close between with a tolerance of 1e-3. - - - - .. py:method:: mean_annual_flood() - - Compute mean annual flood. - - - - .. py:method:: mean_monthly() - - Compute mean monthly flow (for regime curve). - - - - .. py:method:: runoff_ratio() - - Compute total runoff ratio. - - - - .. py:method:: seasonality_index() - - Compute seasonality index. - - - - .. py:method:: streamflow_elasticity() - - Compute streamflow elasticity. - - - - .. py:method:: to_dict() - - Return a dictionary with the hydrological signatures. - - - - .. py:method:: to_json() - - Return a JSON string with the hydrological signatures. - - - - .. py:property:: signature_names - :type: dict[str, str] - - - Return a dictionary with the hydrological signatures. - - - .. py:property:: values - :type: SignaturesFloat - - - Return a dictionary with the hydrological signatures. - - -.. py:function:: aridity_index(pet: pandas.Series, prcp: pandas.Series) -> numpy.float64 - aridity_index(pet: pandas.DataFrame, prcp: pandas.DataFrame) -> pandas.Series - aridity_index(pet: xarray.DataArray, prcp: xarray.DataArray) -> xarray.DataArray - - Compute (Budyko) aridity index (PET/Prcp). - - :Parameters: * **pet** (:class:`pandas.DataFrame` or :class:`pandas.Series` or :class:`xarray.DataArray`) -- Potential evapotranspiration time series. Each column can - correspond to PET a different location. Note that ``pet`` and ``prcp`` - must have the same shape. - * **prcp** (:class:`pandas.DataFrame` or :class:`pandas.Series` or :class:`xarray.DataArray`) -- Precipitation time series. Each column can - correspond to PET a different location. Note that ``pet`` and ``prcp`` - must have the same shape. - - :returns: :class:`float` or :class:`pandas.Series` or :class:`xarray.DataArray` -- The aridity index. - - -.. py:function:: exceedance(daily, threshold = 0.001) - - Compute exceedance probability from daily data. - - :Parameters: * **daily** (:class:`pandas.Series` or :class:`pandas.DataFrame`) -- The data to be processed - * **threshold** (:class:`float`, *optional*) -- The threshold to compute exceedance probability, defaults to 1e-3. - - :returns: :class:`pandas.Series` or :class:`pandas.DataFrame` -- Exceedance probability. - - -.. py:function:: extract_extrema(ts, var_name, n_pts) - - Get local extrema in a time series. - - :Parameters: * **ts** (:class:`pandas.Series`) -- Variable time series. - * **var_name** (:class:`str`) -- Variable name. - * **n_pts** (:class:`int`) -- Number of points to consider for detecting local extrema on both - sides of each point. - - :returns: :class:`pandas.DataFrame` -- A dataframe with three columns: ``var_name``, ``peak`` (bool) - and ``trough`` (bool). - - -.. py:function:: flashiness_index(daily) - - Compute flashiness index from daily data following Baker et al. (2004). - - :Parameters: **daily** (:class:`pandas.Series` or :class:`pandas.DataFrame` or :class:`numpy.ndarray` or :class:`xarray.DataArray`) -- The data to be processed - - :returns: :class:`numpy.ndarray` -- Flashiness index. - - .. rubric:: References - - Baker, D.B., Richards, R.P., Loftus, T.T. and Kramer, J.W., 2004. A new - flashiness index: Characteristics and applications to midwestern rivers - and streams 1. JAWRA Journal of the American Water Resources - Association, 40(2), pp.503-522. - - -.. py:function:: flood_moments(streamflow) - - Compute flood moments (MAF, CV, CS) from streamflow. - - :Parameters: **streamflow** (:class:`pandas.DataFrame`) -- The streamflow data to be processed - - :returns: :class:`pandas.DataFrame` -- Flood moments; mean annual flood (MAF), coefficient - of variation (CV), and coefficient of skewness (CS). - - -.. py:function:: flow_duration_curve_slope(discharge, bins, log) - - Compute FDC slopes between the given lower and upper percentiles. - - :Parameters: * **discharge** (:class:`pandas.Series` or :class:`pandas.DataFrame` or :class:`numpy.ndarray` or :class:`xarray.DataArray`) -- The discharge data to be processed. - * **bins** (:class:`tuple` of :class:`int`) -- Percentile bins for computing FDC slopes between., e.g., (33, 67) - returns the slope between the 33rd and 67th percentiles. - * **log** (:class:`bool`) -- Whether to use log-transformed data. - - :returns: :class:`numpy.ndarray` -- The slopes between the given percentiles. - - -.. py:function:: mean_monthly(daily, index_abbr = False, cms = False) - - Compute mean monthly summary from daily data. - - :Parameters: * **daily** (:class:`pandas.Series` or :class:`pandas.DataFrame`) -- The data to be processed - * **index_abbr** (:class:`bool`, *optional*) -- Whether to use abbreviated month names as index instead of - numbers, defaults to False. - * **cms** (:class:`bool`, *optional*) -- Whether the input data is in cubic meters per second (cms), - defaults to False. If True, the mean monthly summary will be - computed by taking the mean of the daily data, otherwise the - sum of the daily data will be used. - - :returns: :class:`pandas.Series` or :class:`pandas.DataFrame` -- Mean monthly summary. - - -.. py:function:: rolling_mean_monthly(daily) - - Compute rolling mean monthly. - - -.. py:function:: seasonality_index_markham(data) - - Compute seasonality index based on Markham, 1970. - - -.. py:function:: seasonality_index_walsh(data) - - Compute seasonality index based on Walsh and Lawler, 1981 method. - - diff --git a/docs/source/autoapi/hydrosignatures/index.rst b/docs/source/autoapi/hydrosignatures/index.rst deleted file mode 100644 index c361591..0000000 --- a/docs/source/autoapi/hydrosignatures/index.rst +++ /dev/null @@ -1,21 +0,0 @@ -hydrosignatures -=============== - -.. py:module:: hydrosignatures - -.. autoapi-nested-parse:: - - Top-level package for HydroSignatures. - - - -Submodules ----------- - -.. toctree:: - :maxdepth: 1 - - /autoapi/hydrosignatures/baseflow/index - /autoapi/hydrosignatures/hydrosignatures/index - - diff --git a/docs/source/autoapi/index.rst b/docs/source/autoapi/index.rst deleted file mode 100644 index ba50f7b..0000000 --- a/docs/source/autoapi/index.rst +++ /dev/null @@ -1,19 +0,0 @@ -API References -============== - -.. include:: ../versions.txt - -.. toctree:: - :titlesonly: - :hidden: - - PyNHD - PyGeoHydro - Py3DEP - PyDaymet - PyGridMET - PyNLDAS2 - HydroSignatures - AsyncRetriever - PyGeoOGC - PyGeoUtils diff --git a/docs/source/autoapi/py3dep/geoops/index.rst b/docs/source/autoapi/py3dep/geoops/index.rst deleted file mode 100644 index 84fa60d..0000000 --- a/docs/source/autoapi/py3dep/geoops/index.rst +++ /dev/null @@ -1,63 +0,0 @@ -py3dep.geoops -============= - -.. py:module:: py3dep.geoops - -.. autoapi-nested-parse:: - - Utilities for Py3DEP. - - - - - -Module Contents ---------------- - -.. py:function:: deg2mpm(slope) - - Convert slope from degrees to meter/meter. - - :Parameters: **slope** (:class:`xarray.DataArray`) -- Slope in degrees. - - :returns: :class:`xarray.DataArray` -- Slope in meter/meter. The name is set to ``slope`` and the ``units`` attribute - is set to ``m/m``. - - -.. py:function:: fill_depressions(elevtn, outlets = 'min', idxs_pit = None, nodata = np.nan, max_depth = -1.0, elv_max = None, connectivity = 8) - - Fill local depressions in elevation data based on Wang and Liu (2006). - - .. note:: - - This function is based on the ``fill_depressions`` function from the - `pyflwdir `__ package. This function - improves the performance of the original function by a factor of up to 2 and - adds more input checks. Additionally, it works with ``xarray.DataArray`` objects. - - Outlets are assumed to occur at the edge of valid elevation cells ``outlets='edge'``; - at the lowest valid edge cell to create one single outlet ``outlets='min'``; - or at user provided outlet cells ``idxs_pit``. - - Depressions elsewhere are filled based on its lowest pour point elevation. - If the pour point depth is larger than the maximum pour point depth ``max_depth`` - a pit is set at the depression local minimum elevation. - - Wang, L., & Liu, H. (2006). https://doi.org/10.1080/13658810500433453 - - :Parameters: * **elevtn** (:class:`numpy.ndarray` or :class:`xarray.DataArray`) -- elevation raster as a 2D ``numpy.ndarray`` or ``xarray.DataArray``. - * **outlets** (``{"edge", "min}``, *optional*) -- Initial basin outlet(s) at the edge of all cells ('edge') - or only the minimum elevation edge cell ('min'; default) - * **idxs_pit** (:class:`1D array` of :class:`int`, *optional*) -- Linear indices of outlet cells, in any, defaults to None. - * **nodata** (:class:`float`, *optional*) -- nodata value, defaults to ``numpy.nan``. - * **max_depth** (:class:`float`, *optional*) -- Maximum pour point depth. Depressions with a larger pour point - depth are set as pit. A negative value (default) equals an infitely - large pour point depth causing all depressions to be filled. - Defaults to -1.0. - * **elv_max, float, optional** -- Maximum elevation for outlets, only in combination with ``outlets='edge'``. - By default ``None``. - * **connectivity** (``{4, 8}``, *optional*) -- Number of neighboring cells to consider, defaults to 8. - - :returns: **elevtn_out** (:class:`numpy.ndarray`) -- Depression filled elevation with type float32. - - diff --git a/docs/source/autoapi/py3dep/index.rst b/docs/source/autoapi/py3dep/index.rst deleted file mode 100644 index f9bfef9..0000000 --- a/docs/source/autoapi/py3dep/index.rst +++ /dev/null @@ -1,21 +0,0 @@ -py3dep -====== - -.. py:module:: py3dep - -.. autoapi-nested-parse:: - - Top-level package for Py3DEP. - - - -Submodules ----------- - -.. toctree:: - :maxdepth: 1 - - /autoapi/py3dep/geoops/index - /autoapi/py3dep/py3dep/index - - diff --git a/docs/source/autoapi/py3dep/py3dep/index.rst b/docs/source/autoapi/py3dep/py3dep/index.rst deleted file mode 100644 index cf69c51..0000000 --- a/docs/source/autoapi/py3dep/py3dep/index.rst +++ /dev/null @@ -1,230 +0,0 @@ -py3dep.py3dep -============= - -.. py:module:: py3dep.py3dep - -.. autoapi-nested-parse:: - - Get data from 3DEP database. - - - - - -Module Contents ---------------- - -.. py:function:: add_elevation(ds, resolution = None, x_dim = 'x', y_dim = 'y', mask = None) - - Add elevation data to a dataset as a new variable. - - :Parameters: * **ds** (:class:`xarray.DataArray` or :class:`xarray.Dataset`) -- The dataset to add elevation data to. It must contain - CRS information. - * **resolution** (:class:`float`, *optional*) -- Target DEM source resolution in meters, defaults ``None``, i.e., - the resolution of the input ``ds`` will be used. - * **x_dim** (:class:`str`, *optional*) -- Name of the x-coordinate dimension in ``ds``, defaults to ``x``. - * **y_dim** (:class:`str`, *optional*) -- Name of the y-coordinate dimension in ``ds``, defaults to ``y``. - * **mask** (:class:`xarray.DataArray`, *optional*) -- A mask to apply to the elevation data, defaults to ``None``. - - :returns: :class:`xarray.Dataset` -- The dataset with ``elevation`` variable added. - - -.. py:function:: check_3dep_availability(bbox, crs = 4326) - - Query 3DEP's resolution availability within a bounding box. - - This function checks availability of 3DEP's at the following resolutions: - 1 m, 3 m, 5 m, 10 m, 30 m, 60 m, and topobathy (integrated topobathymetry). - - :Parameters: * **bbox** (:class:`tuple`) -- Bounding box as tuple of ``(min_x, min_y, max_x, max_y)``. - * **crs** (:class:`str`, :class:`int`, or :class:`pyproj.CRS` or :class:`pyproj.CRS`, *optional*) -- Spatial reference (CRS) of ``bbox``, defaults to ``EPSG:4326``. - - :returns: :class:`dict` -- ``True`` if bbox intersects 3DEP elevation for each available resolution. - Keys are the supported resolutions and values are their availability. - If the query fails due to any reason, the value will be ``Failed``. - If necessary, you can try again later until there is no ``Failed`` value. - - .. rubric:: Examples - - >>> import py3dep - >>> bbox = (-69.77, 45.07, -69.31, 45.45) - >>> py3dep.check_3dep_availability(bbox) - {'1m': True, '3m': False, '5m': False, '10m': True, '30m': True, '60m': False, 'topobathy': False} - - -.. py:function:: elevation_bycoords(coords: tuple[float, float], crs: CRSType = ..., source: Literal['tep', 'tnm'] = ...) -> float - elevation_bycoords(coords: list[tuple[float, float]], crs: CRSType = ..., source: Literal['tep', 'tnm'] = ...) -> list[float] - - Get elevation for a list of coordinates. - - :Parameters: * **coords** (:class:`tuple` or :class:`list` of :class:`tuple`) -- Coordinates of target location(s), e.g., ``[(x, y), ...]``. - * **crs** (:class:`str`, :class:`int`, or :class:`pyproj.CRS` or :class:`pyproj.CRS`, *optional*) -- Spatial reference (CRS) of coords, defaults to ``EPSG:4326``. - * **source** (:class:`str`, *optional*) -- Data source to be used, default to ``tep``. Supported sources are - ``tnm`` (using The National Map's Bulk Point - Query Service with 10 m resolution) and ``tep`` (using 3DEP's static DEM VRTs - at 10 m resolution). The ``tnm`` and ``tep`` sources are more accurate since they - use the 1/3 arc-second DEM layer from 3DEP service but it is limited to the US. - Note that ``tnm`` is bit unstable. It's recommended to use ``tep`` unless 10-m - resolution accuracy is not necessary. - - :returns: :class:`float` or :class:`list` of :class:`float` -- Elevation in meter. - - -.. py:function:: elevation_bygrid(xcoords, ycoords, crs, resolution, depression_filling = False) - - Get elevation from DEM data for a grid. - - This function is intended for getting elevations for a gridded dataset. - - :Parameters: * **xcoords** (:class:`list`) -- List of x-coordinates of a grid. - * **ycoords** (:class:`list`) -- List of y-coordinates of a grid. - * **crs** (:class:`str`, :class:`int`, or :class:`pyproj.CRS` or :class:`pyproj.CRS`) -- The spatial reference system of the input grid, - defaults to ``EPSG:4326``. - * **resolution** (:class:`int`) -- The accuracy of the output, defaults to 10 m which is the highest - available resolution that covers CONUS. Note that higher resolution - increases computation time so chose this value with caution. - * **depression_filling** (:class:`bool`, *optional*) -- Fill depressions before sampling using - `Wang and Liu (2006) `__ - method, defaults to ``False``. - - :returns: :class:`xarray.DataArray` -- Elevations of the input coordinates as a ``xarray.DataArray``. - - -.. py:function:: elevation_profile(lines, spacing, crs = 4326) - - Get the elevation profile along a line at a given uniform spacing. - - .. note:: - - This function converts the line to a spline and then calculates the elevation - along the spline at a given uniform spacing using 10-m resolution DEM from 3DEP. - - :Parameters: * **lines** (:class:`LineString` or :class:`MultiLineString`) -- Line segment(s) to be profiled. If its type is ``MultiLineString``, - it will be converted to a single ``LineString`` and if this operation - fails, an ``InputTypeError`` will be raised. - * **spacing** (:class:`float`) -- Spacing between the sample points along the line in meters. - * **crs** (:class:`str`, :class:`int`, or :class:`pyproj.CRS`, *optional*) -- Spatial reference System (CRS) of ``lines``, defaults to ``EPSG:4326``. - - :returns: :class:`xarray.DataArray` -- Elevation profile with dimension ``z`` and three coordinates: ``x``, ``y``, - and ``distance``. The ``distance`` coordinate is the distance from the start - of the line in meters. - - -.. py:function:: get_dem(geometry, resolution, crs = 4326) - - Get DEM data at any resolution from 3DEP. - - .. rubric:: Notes - - This function is a wrapper of ``static_3dep_dem`` and ``get_map`` functions. - Since ``static_3dep_dem`` is much faster, if the requested resolution is 10 m, - 30 m, or 60 m, ``static_3dep_dem`` will be used. Otherwise, ``get_map`` - will be used. - - :Parameters: * **geometry** (:class:`Polygon`, :class:`MultiPolygon`, or :class:`tuple` of :class:`length 4`) -- Geometry to get DEM within. It can be a polygon or a boundong box - of form (xmin, ymin, xmax, ymax). - * **resolution** (:class:`int`) -- Target DEM source resolution in meters. - * **crs** (:class:`str`, :class:`int`, or :class:`pyproj.CRS`, *optional*) -- The spatial reference system of the input geometry, defaults to ``EPSG:4326``. - - :returns: :class:`xarray.DataArray` -- DEM at the specified resolution in meters and 5070 CRS. - - -.. py:function:: get_dem_vrt(bbox, resolution, vrt_path, tiff_dir = 'cache', crs = 4326) - - Get DEM data at any resolution from 3DEP and save it as a VRT file. - - :Parameters: * **bbox** (:class:`tuple` of :class:`length 4`) -- The boundong box of form (xmin, ymin, xmax, ymax). - * **resolution** (:class:`int`) -- Target DEM source resolution in meters. - * **vrt_path** (:class:`str` or :class:`pathlib.Path`) -- Path to the output VRT file. - * **tiff_dir** (:class:`str` or :class:`pathlib.Path`, *optional*) -- Path to the directory to save the downloaded TIFF file, defaults - to ``./cache``. - * **crs** (:class:`str`, :class:`int`, or :class:`pyproj.CRS`, *optional*) -- The spatial reference system of ``bbox``, defaults to ``EPSG:4326``. - - -.. py:function:: get_map(layers: str, geometry: shapely.Polygon | shapely.MultiPolygon | tuple[float, float, float, float], resolution: int, geo_crs: CRSType = ..., crs: CRSType = ...) -> xarray.DataArray - get_map(layers: list[str], geometry: shapely.Polygon | shapely.MultiPolygon | tuple[float, float, float, float], resolution: int, geo_crs: CRSType = ..., crs: CRSType = ...) -> xarray.Dataset - - Access dynamic layer of `3DEP `__. - - The 3DEP service has multi-resolution sources, so depending on the user - provided resolution the data is resampled on server-side based - on all the available data sources. The following layers are available: - - - ``DEM`` - - ``Hillshade Gray`` - - ``Aspect Degrees`` - - ``Aspect Map`` - - ``GreyHillshade_elevationFill`` - - ``Hillshade Multidirectional`` - - ``Slope Map`` - - ``Slope Degrees`` - - ``Hillshade Elevation Tinted`` - - ``Height Ellipsoidal`` - - ``Contour 25`` - - ``Contour Smoothed 25`` - - :Parameters: * **layers** (:class:`str` or :class:`list` of :class:`str`) -- A valid 3DEP layer or a list of them. - * **geometry** (:class:`Polygon`, :class:`MultiPolygon`, or :class:`tuple`) -- A shapely Polygon or a bounding box of the form ``(west, south, east, north)``. - * **resolution** (:class:`int`) -- The target resolution in meters. The width and height of the output are computed in - pixels based on the geometry bounds and the given resolution. - * **geo_crs** (:class:`str`, :class:`int`, or :class:`pyproj.CRS`, *optional*) -- The spatial reference system of the input geometry, defaults to 4326. - * **crs** (:class:`str`, :class:`int`, or :class:`pyproj.CRS`, *optional*) -- The spatial reference system to be used for requesting the data, - defaults to 5070. Valid values are 4326, 3857, 3338, 3571, 3572, - 3573, 3574, 3575, 3576, and 5070. Note that at the moment due to - an issue on the server-side (USGS's 3DEP web service), when passing - 4326, the server returns invalid data. So it's recommended to use - 5070 for the time being. - - :returns: :class:`xarray.DataArray` or :class:`xarray.Dataset` -- The requested topographic data as an ``xarray.DataArray`` or ``xarray.Dataset``. - - -.. py:function:: query_3dep_sources(bbox, crs = 4326, res = None) - - Query 3DEP's data sources within a bounding box. - - This function queries the availability of the underlying data that 3DEP uses - at the following resolutions: - 1 m, 3 m, 5 m, 10 m, 30 m, 60 m, and topobathy (integrated topobathymetry). - - :Parameters: * **bbox** (:class:`tuple`) -- Bounding box as tuple of ``(min_x, min_y, max_x, max_y)``. - * **crs** (:class:`str`, :class:`int`, or :class:`pyproj.CRS` or :class:`pyproj.CRS`, *optional*) -- Spatial reference (CRS) of bbox, defaults to ``EPSG:4326``. - * **res** (:class:`str`, :class:`list` of :class:`str`, *optional*) -- Resolution to query, defaults to ``None``, i.e., all resolutions. - Available resolutions are: ``1m``, ``3m``, ``5m``, ``10m``, ``30m``, - ``60m``, and ``topobathy``. - - :returns: :class:`geopandas.GeoDataFrame` -- Polygon(s) representing the 3DEP data sources at each resolution. - Resolutions are given in the ``dem_res`` column. - - .. rubric:: Examples - - >>> import py3dep - >>> bbox = (-69.77, 45.07, -69.31, 45.45) - >>> src = py3dep.query_3dep_sources(bbox) - >>> src.groupby("dem_res")["OBJECTID"].count().to_dict() - {'10m': 16, '1m': 4, '30m': 8} - >>> src = py3dep.query_3dep_sources(bbox, res="1m") - >>> src.groupby("dem_res")["OBJECTID"].count().to_dict() - {'1m': 4} - - -.. py:function:: static_3dep_dem(geometry, crs, resolution = 10) - - Get DEM data at specific resolution from 3DEP. - - .. rubric:: Notes - - In contrast to ``get_map`` function, this function only gets DEM data at - specific resolution, namely 10 m, 30 m, and 60 m. However, this function - is faster. This function is intended for cases where only need DEM at a - specific resolution is required and for the other requests ``get_map`` - should be used. - - :Parameters: * **geometry** (:class:`Polygon`, :class:`MultiPolygon`, or :class:`tuple` of :class:`length 4`) -- Geometry to get DEM within. It can be a polygon or a boundong box - of form (xmin, ymin, xmax, ymax). - * **crs** (:class:`int`, :class:`str`, :class:`of pyproj.CRS`) -- CRS of the input geometry. - * **resolution** (:class:`int`, *optional*) -- Target DEM source resolution in meters, defaults to 10 m which is the highest - resolution available over the US. Available options are 10, 30, and 60. - - :returns: :class:`xarray.DataArray` -- The request DEM at the specified resolution. - - diff --git a/docs/source/autoapi/pydaymet/core/index.rst b/docs/source/autoapi/pydaymet/core/index.rst deleted file mode 100644 index 4a2f8a2..0000000 --- a/docs/source/autoapi/pydaymet/core/index.rst +++ /dev/null @@ -1,110 +0,0 @@ -pydaymet.core -============= - -.. py:module:: pydaymet.core - -.. autoapi-nested-parse:: - - Core class for the Daymet functions. - - - - - - - -Module Contents ---------------- - -.. py:class:: Daymet(variables = None, pet = None, snow = False, time_scale = 'daily', region = 'na') - - Base class for Daymet requests. - - :Parameters: * **variables** (:class:`str` or :class:`list` or :class:`tuple`, *optional*) -- List of variables to be downloaded. The acceptable variables are: - ``tmin``, ``tmax``, ``prcp``, ``srad``, ``vp``, ``swe``, ``dayl`` - Descriptions can be found `here `__. - Defaults to None i.e., all the variables are downloaded. - * **pet** (:class:`str`, *optional*) -- Method for computing PET. Supported methods are - ``penman_monteith``, ``priestley_taylor``, ``hargreaves_samani``, and - None (don't compute PET). The ``penman_monteith`` method is based on - :footcite:t:`Allen_1998` assuming that soil heat flux density is zero. - The ``priestley_taylor`` method is based on - :footcite:t:`Priestley_1972` assuming that soil heat flux density is zero. - The ``hargreaves_samani`` method is based on :footcite:t:`Hargreaves_1982`. - Defaults to ``None``. - * **snow** (:class:`bool`, *optional*) -- Compute snowfall from precipitation and minimum temperature. Defaults to ``False``. - * **time_scale** (:class:`str`, *optional*) -- Data time scale which can be daily, monthly (monthly summaries), - or annual (annual summaries). Defaults to daily. - * **region** (:class:`str`, *optional*) -- Region in the US, defaults to na. Acceptable values are: - - * na: Continental North America - * hi: Hawaii - * pr: Puerto Rico - - .. rubric:: References - - .. footbibliography:: - - - .. py:method:: check_dates(dates) - :staticmethod: - - - Check if input dates are in correct format and valid. - - - - .. py:method:: dates_todict(dates) - - Set dates by start and end dates as a tuple, (start, end). - - - - .. py:method:: dates_tolist(dates) - - Correct dates for Daymet accounting for leap years. - - Daymet doesn't account for leap years and removes Dec 31 when - it's leap year. - - :Parameters: **dates** (:class:`tuple`) -- Target start and end dates. - - :returns: :class:`list` -- All the dates in the Daymet database within the provided date range. - - - - .. py:method:: years_todict(years) - - Set date by list of year(s). - - - - .. py:method:: years_tolist(years) - - Correct dates for Daymet accounting for leap years. - - Daymet doesn't account for leap years and removes Dec 31 when - it's leap year. - - :Parameters: **years** (:class:`list`) -- A list of target years. - - :returns: :class:`list` -- All the dates in the Daymet database within the provided date range. - - - -.. py:function:: separate_snow(clm, t_rain = T_RAIN, t_snow = T_SNOW) - - Separate snow based on :footcite:t:`Martinez_2010`. - - :Parameters: * **clm** (:class:`pandas.DataFrame` or :class:`xarray.Dataset`) -- Climate data that should include ``prcp`` and ``tmin``. - * **t_rain** (:class:`float`, *optional*) -- Threshold for temperature for considering rain, defaults to 2.5 degrees C. - * **t_snow** (:class:`float`, *optional*) -- Threshold for temperature for considering snow, defaults to 0.6 degrees C. - - :returns: :class:`pandas.DataFrame` or :class:`xarray.Dataset` -- Input data with ``snow (mm/day)`` column if input is a ``pandas.DataFrame``, - or ``snow`` variable if input is an ``xarray.Dataset``. - - .. rubric:: References - - .. footbibliography:: - - diff --git a/docs/source/autoapi/pydaymet/index.rst b/docs/source/autoapi/pydaymet/index.rst deleted file mode 100644 index e7d8df0..0000000 --- a/docs/source/autoapi/pydaymet/index.rst +++ /dev/null @@ -1,22 +0,0 @@ -pydaymet -======== - -.. py:module:: pydaymet - -.. autoapi-nested-parse:: - - Top-level package for PyDaymet. - - - -Submodules ----------- - -.. toctree:: - :maxdepth: 1 - - /autoapi/pydaymet/core/index - /autoapi/pydaymet/pet/index - /autoapi/pydaymet/pydaymet/index - - diff --git a/docs/source/autoapi/pydaymet/pet/index.rst b/docs/source/autoapi/pydaymet/pet/index.rst deleted file mode 100644 index 4a53a13..0000000 --- a/docs/source/autoapi/pydaymet/pet/index.rst +++ /dev/null @@ -1,82 +0,0 @@ -pydaymet.pet -============ - -.. py:module:: pydaymet.pet - -.. autoapi-nested-parse:: - - Core class for the Daymet functions. - - - - - -Module Contents ---------------- - -.. py:function:: potential_et(clm: pandas.DataFrame, coords: tuple[float, float], crs: CRSType, method: Literal['penman_monteith', 'priestley_taylor', 'hargreaves_samani'] = ..., params: dict[str, float] | None = ...) -> pandas.DataFrame - potential_et(clm: xarray.Dataset, coords: None = None, crs: None = None, method: Literal['penman_monteith', 'priestley_taylor', 'hargreaves_samani'] = ..., params: dict[str, float] | None = ...) -> xarray.Dataset - - Compute Potential EvapoTranspiration for both gridded and a single location. - - :Parameters: * **clm** (:class:`pandas.DataFrame` or :class:`xarray.Dataset`) -- The dataset must include at least the following variables: - - * Minimum temperature in degree celsius - * Maximum temperature in degree celsius - * Solar radiation in in W/m2 - * Daylight duration in seconds - - Optionally, for ``penman_monteith``, wind speed at 2-m level - will be used if available, otherwise, default value of 2 m/s - will be assumed. Table below shows the variable names - that the function looks for in the input data. - - ==================== ================== - ``pandas.DataFrame`` ``xarray.Dataset`` - ==================== ================== - ``tmin (degrees C)`` ``tmin`` - ``tmax (degrees C)`` ``tmax`` - ``srad (W/m2)`` ``srad`` - ``dayl (s)`` ``dayl`` - ``u2m (m/s)`` ``u2m`` - ==================== ================== - * **coords** (:class:`tuple` of :class:`floats`, *optional*) -- Coordinates of the daymet data location as a tuple, (x, y). This is required when ``clm`` - is a ``DataFrame``. - * **crs** (:class:`str`, :class:`int`, or :class:`pyproj.CRS`, *optional*) -- The spatial reference of the input coordinate, defaults to ``EPSG:4326``. This is only used - when ``clm`` is a ``DataFrame``. - * **method** (:class:`str`, *optional*) -- Method for computing PET. Supported methods are - ``penman_monteith``, ``priestley_taylor``, and ``hargreaves_samani``. - The ``penman_monteith`` method is based on - :footcite:t:`Allen_1998` assuming that soil heat flux density is zero. - The ``priestley_taylor`` method is based on - :footcite:t:`Priestley_1972` assuming that soil heat flux density is zero. - The ``hargreaves_samani`` method is based on :footcite:t:`Hargreaves_1982`. - Defaults to ``hargreaves_samani``. - * **params** (:class:`dict`, *optional*) -- Model-specific parameters as a dictionary, defaults to ``None``. Valid - parameters are: - - * ``penman_monteith``: ``soil_heat_flux``, ``albedo``, ``alpha``, - and ``arid_correction``. - * ``priestley_taylor``: ``soil_heat_flux``, ``albedo``, and ``arid_correction``. - * ``hargreaves_samani``: None. - - Default values for the parameters are: ``soil_heat_flux`` = 0, ``albedo`` = 0.23, - ``alpha`` = 1.26, and ``arid_correction`` = False. - An important parameter for ``priestley_taylor`` and ``penman_monteith`` methods - is ``arid_correction`` which is used to correct the actual vapor pressure - for arid regions. Since relative humidity is not provided by Daymet, the actual - vapor pressure is computed assuming that the dewpoint temperature is equal to - the minimum temperature. However, for arid regions, FAO 56 suggests subtracting - minimum temperature by 2-3 °C to account for the fact that in arid regions, - the air might not be saturated when its temperature is at its minimum. For such - areas, you can pass ``{"arid_correction": True, ...}`` to subtract 2 °C from the - minimum temperature for computing the actual vapor pressure. - - :returns: :class:`pandas.DataFrame` or :class:`xarray.Dataset` -- The input DataFrame/Dataset with an additional variable named ``pet (mm/day)`` for - ``pandas.DataFrame`` and ``pet`` for ``xarray.Dataset``. - - .. rubric:: References - - .. footbibliography:: - - diff --git a/docs/source/autoapi/pydaymet/pydaymet/index.rst b/docs/source/autoapi/pydaymet/pydaymet/index.rst deleted file mode 100644 index 32df4be..0000000 --- a/docs/source/autoapi/pydaymet/pydaymet/index.rst +++ /dev/null @@ -1,252 +0,0 @@ -pydaymet.pydaymet -================= - -.. py:module:: pydaymet.pydaymet - -.. autoapi-nested-parse:: - - Access the Daymet database for both single single pixel and gridded queries. - - - - - -Module Contents ---------------- - -.. py:function:: get_bycoords(coords, dates, coords_id = None, crs = 4326, variables = None, region = 'na', time_scale = 'daily', pet = None, pet_params = None, snow = False, snow_params = None, to_xarray = False) - - Get point-data from the Daymet database at 1-km resolution. - - This function uses THREDDS data service to get the coordinates - and supports getting monthly and annual summaries of the climate - data directly from the server. - - :Parameters: * **coords** (:class:`tuple` or :class:`list` of :class:`tuples`) -- Coordinates of the location(s) of interest as a tuple (x, y) - * **dates** (:class:`tuple` or :class:`list`) -- Start and end dates as a tuple (start, end) or a list of years ``[2001, 2010, ...]``. - * **coords_id** (:class:`list` of :class:`int` or :class:`str`, *optional*) -- A list of identifiers for the coordinates. This option only applies when ``to_xarray`` - is set to ``True``. If not provided, the coordinates will be enumerated. - * **crs** (:class:`str`, :class:`int`, or :class:`pyproj.CRS`, *optional*) -- The CRS of the input coordinates, defaults to ``EPSG:4326``. - * **variables** (:class:`str` or :class:`list`) -- List of variables to be downloaded. The acceptable variables are: - ``tmin``, ``tmax``, ``prcp``, ``srad``, ``vp``, ``swe``, ``dayl`` - Descriptions can be found `here `__. - * **region** (:class:`str`, *optional*) -- Target region in the US, defaults to ``na``. Acceptable values are: - - * ``na``: Continental North America - * ``hi``: Hawaii - * ``pr``: Puerto Rico - * **time_scale** (:class:`str`, *optional*) -- Data time scale which can be ``daily``, ``monthly`` (monthly summaries), - or ``annual`` (annual summaries). Defaults to ``daily``. - * **pet** (:class:`str`, *optional*) -- Method for computing PET. Supported methods are - ``penman_monteith``, ``priestley_taylor``, ``hargreaves_samani``, and - None (don't compute PET). The ``penman_monteith`` method is based on - :footcite:t:`Allen_1998` assuming that soil heat flux density is zero. - The ``priestley_taylor`` method is based on - :footcite:t:`Priestley_1972` assuming that soil heat flux density is zero. - The ``hargreaves_samani`` method is based on :footcite:t:`Hargreaves_1982`. - Defaults to ``None``. - * **pet_params** (:class:`dict`, *optional*) -- Model-specific parameters as a dictionary, defaults to ``None``. An important - parameter for ``priestley_taylor`` and ``penman_monteith`` methods is - ``arid_correction`` which is used to correct the actual vapor pressure - for arid regions. Since relative humidity is not provided by Daymet, the actual - vapor pressure is computed assuming that the dewpoint temperature is equal to - the minimum temperature. However, for arid regions, FAO 56 suggests subtracting - the minimum temperature by 2-3 °C to account for aridity, since in arid regions, - the air might not be saturated when its temperature is at its minimum. For such - areas, you can pass ``{"arid_correction": True, ...}`` to subtract 2 °C from the - minimum temperature before computing the actual vapor pressure. - * **snow** (:class:`bool`, *optional*) -- Compute snowfall from precipitation and minimum temperature. Defaults to ``False``. - * **snow_params** (:class:`dict`, *optional*) -- Model-specific parameters as a dictionary that is passed to the snowfall function. - These parameters are only used if ``snow`` is ``True``. Two parameters are required: - ``t_rain`` (deg C) which is the threshold for temperature for considering rain and - ``t_snow`` (deg C) which is the threshold for temperature for considering snow. - The default values are ``{'t_rain': 2.5, 't_snow': 0.6}`` that are adopted from - https://doi.org/10.5194/gmd-11-1077-2018. - * **to_xarray** (:class:`bool`, *optional*) -- Return the data as an ``xarray.Dataset``. Defaults to ``False``. - - :returns: :class:`pandas.DataFrame` or :class:`xarray.Dataset` -- Daily climate data for a single or list of locations. - - .. rubric:: Examples - - >>> import pydaymet as daymet - >>> coords = (-1431147.7928, 318483.4618) - >>> dates = ("2000-01-01", "2000-12-31") - >>> clm = daymet.get_bycoords( - ... coords, - ... dates, - ... crs=3542, - ... pet="hargreaves_samani", - ... ) - >>> clm["pet (mm/day)"].mean() - 3.713 - - .. rubric:: References - - .. footbibliography:: - - -.. py:function:: get_bygeom(geometry, dates, crs = 4326, variables = None, region = 'na', time_scale = 'daily', pet = None, pet_params = None, snow = False, snow_params = None) - - Get gridded data from the Daymet database at 1-km resolution. - - :Parameters: * **geometry** (:class:`Polygon` or :class:`tuple`) -- The geometry of the region of interest. It can be a shapely Polygon or a tuple - of length 4 representing the bounding box (minx, miny, maxx, maxy). - * **dates** (:class:`tuple` or :class:`list`) -- Start and end dates as a tuple (start, end) or a list of years [2001, 2010, ...]. - * **crs** (:class:`str`, :class:`int`, or :class:`pyproj.CRS`, *optional*) -- The CRS of the input geometry, defaults to epsg:4326. - * **variables** (:class:`str` or :class:`list`) -- List of variables to be downloaded. The acceptable variables are: - ``tmin``, ``tmax``, ``prcp``, ``srad``, ``vp``, ``swe``, ``dayl`` - Descriptions can be found `here `__. - * **region** (:class:`str`, *optional*) -- Region in the US, defaults to na. Acceptable values are: - - * na: Continental North America - * hi: Hawaii - * pr: Puerto Rico - * **time_scale** (:class:`str`, *optional*) -- Data time scale which can be daily, monthly (monthly average), - or annual (annual average). Defaults to daily. - * **pet** (:class:`str`, *optional*) -- Method for computing PET. Supported methods are - ``penman_monteith``, ``priestley_taylor``, ``hargreaves_samani``, and - None (don't compute PET). The ``penman_monteith`` method is based on - :footcite:t:`Allen_1998` assuming that soil heat flux density is zero. - The ``priestley_taylor`` method is based on - :footcite:t:`Priestley_1972` assuming that soil heat flux density is zero. - The ``hargreaves_samani`` method is based on :footcite:t:`Hargreaves_1982`. - Defaults to ``None``. - * **pet_params** (:class:`dict`, *optional*) -- Model-specific parameters as a dictionary, defaults to ``None``. Valid - parameters are: - - * ``penman_monteith``: ``soil_heat_flux``, ``albedo``, ``alpha``, - and ``arid_correction``. - * ``priestley_taylor``: ``soil_heat_flux``, ``albedo``, and ``arid_correction``. - * ``hargreaves_samani``: None. - - Default values for the parameters are: ``soil_heat_flux`` = 0, ``albedo`` = 0.23, - ``alpha`` = 1.26, and ``arid_correction`` = False. - An important parameter for ``priestley_taylor`` and ``penman_monteith`` methods - is ``arid_correction`` which is used to correct the actual vapor pressure - for arid regions. Since relative humidity is not provided by Daymet, the actual - vapor pressure is computed assuming that the dewpoint temperature is equal to - the minimum temperature. However, for arid regions, FAO 56 suggests subtracting - the minimum temperature by 2-3 °C to account for aridity, since in arid regions, - the air might not be saturated when its temperature is at its minimum. For such - areas, you can pass ``{"arid_correction": True, ...}`` to subtract 2 °C from the - minimum temperature before computing the actual vapor pressure. - * **snow** (:class:`bool`, *optional*) -- Compute snowfall from precipitation and minimum temperature. Defaults to ``False``. - * **snow_params** (:class:`dict`, *optional*) -- Model-specific parameters as a dictionary that is passed to the snowfall function. - These parameters are only used if ``snow`` is ``True``. Two parameters are required: - ``t_rain`` (deg C) which is the threshold for temperature for considering rain and - ``t_snow`` (deg C) which is the threshold for temperature for considering snow. - The default values are ``{'t_rain': 2.5, 't_snow': 0.6}`` that are adopted from - https://doi.org/10.5194/gmd-11-1077-2018. - - :returns: :class:`xarray.Dataset` -- Daily climate data within the target geometry. - - .. rubric:: Examples - - >>> from shapely import Polygon - >>> import pydaymet as daymet - >>> geometry = Polygon( - ... [[-69.77, 45.07], [-69.31, 45.07], [-69.31, 45.45], [-69.77, 45.45], [-69.77, 45.07]] - ... ) - >>> clm = daymet.get_bygeom(geometry, 2010, variables="tmin", time_scale="annual") - >>> clm["tmin"].mean().item() - 1.361 - - .. rubric:: References - - .. footbibliography:: - - -.. py:function:: get_bystac(geometry, dates, crs = 4326, variables = None, region = 'na', time_scale = 'daily', res_km = 1, pet = None, pet_params = None, snow = False, snow_params = None) - - Get gridded Daymet from STAC. - - .. versionadded:: 0.16.1 - .. note:: - This function provides access to the Daymet data from Microsoft's - the Planetary Computer: - https://planetarycomputer.microsoft.com/dataset/group/daymet. - Although this function can be much faster than :func:`get_bygeom`, - currently, it gives access to Daymet v4.2 from 1980 to 2020. For - accessing the latest version of Daymet (v4.5) you need to use - :func:`get_bygeom`. - - Also, this function requires ``fsspec``, ``dask``, ``zarr``, and - ``pystac-client`` packages. They can be installed using - ``pip install fsspec dask zarr pystac-client`` or - ``conda install fsspec dask-core zarr pystac-client``. - - :Parameters: * **geometry** (:class:`Polygon` or :class:`tuple`) -- The geometry of the region of interest. It can be a shapely Polygon or a tuple - of length 4 representing the bounding box (minx, miny, maxx, maxy). - * **dates** (:class:`tuple`) -- Start and end dates as a tuple (start, end) or a list of years [2001, 2010, ...]. - * **crs** (:class:`str`, :class:`int`, or :class:`pyproj.CRS`, *optional*) -- The CRS of the input geometry, defaults to epsg:4326. - * **variables** (:class:`str` or :class:`list`) -- List of variables to be downloaded. The acceptable variables are: - ``tmin``, ``tmax``, ``prcp``, ``srad``, ``vp``, ``swe``, ``dayl`` - Descriptions can be found `here `__. - * **region** (:class:`str`, *optional*) -- Region in the US, defaults to na. Acceptable values are: - - * na: Continental North America - * hi: Hawaii - * pr: Puerto Rico - * **time_scale** (:class:`str`, *optional*) -- Data time scale which can be daily, monthly (monthly average), - or annual (annual average). Defaults to daily. - * **res_km** (:class:`int`, *optional*) -- Spatial resolution in kilometers, defaults to 1. For values - greater than 1, the data will be aggregated (coarsend) using mean. - * **pet** (:class:`str`, *optional*) -- Method for computing PET. Supported methods are - ``penman_monteith``, ``priestley_taylor``, ``hargreaves_samani``, and - None (don't compute PET). The ``penman_monteith`` method is based on - :footcite:t:`Allen_1998` assuming that soil heat flux density is zero. - The ``priestley_taylor`` method is based on - :footcite:t:`Priestley_1972` assuming that soil heat flux density is zero. - The ``hargreaves_samani`` method is based on :footcite:t:`Hargreaves_1982`. - Defaults to ``None``. - * **pet_params** (:class:`dict`, *optional*) -- Model-specific parameters as a dictionary, defaults to ``None``. Valid - parameters are: - - * ``penman_monteith``: ``soil_heat_flux``, ``albedo``, ``alpha``, - and ``arid_correction``. - * ``priestley_taylor``: ``soil_heat_flux``, ``albedo``, and ``arid_correction``. - * ``hargreaves_samani``: None. - - Default values for the parameters are: ``soil_heat_flux`` = 0, ``albedo`` = 0.23, - ``alpha`` = 1.26, and ``arid_correction`` = False. - An important parameter for ``priestley_taylor`` and ``penman_monteith`` methods - is ``arid_correction`` which is used to correct the actual vapor pressure - for arid regions. Since relative humidity is not provided by Daymet, the actual - vapor pressure is computed assuming that the dewpoint temperature is equal to - the minimum temperature. However, for arid regions, FAO 56 suggests subtracting - the minimum temperature by 2-3 °C to account for aridity, since in arid regions, - the air might not be saturated when its temperature is at its minimum. For such - areas, you can pass ``{"arid_correction": True, ...}`` to subtract 2 °C from the - minimum temperature before computing the actual vapor pressure. - * **snow** (:class:`bool`, *optional*) -- Compute snowfall from precipitation and minimum temperature. Defaults to ``False``. - * **snow_params** (:class:`dict`, *optional*) -- Model-specific parameters as a dictionary that is passed to the snowfall function. - These parameters are only used if ``snow`` is ``True``. Two parameters are required: - ``t_rain`` (deg C) which is the threshold for temperature for considering rain and - ``t_snow`` (deg C) which is the threshold for temperature for considering snow. - The default values are ``{'t_rain': 2.5, 't_snow': 0.6}`` that are adopted from - https://doi.org/10.5194/gmd-11-1077-2018. - - :returns: :class:`xarray.Dataset` -- Daily climate data within the target geometry. - - .. rubric:: Examples - - >>> from shapely import Polygon - >>> geometry = Polygon( - ... [[-69.77, 45.07], [-69.70, 45.07], [-69.70, 45.15], [-69.77, 45.15], [-69.77, 45.07]] - ... ) - >>> clm = daymet.get_bystac( - ... geometry, - ... ("2010-01-01", "2010-01-02"), - ... variables="prcp", - ... res_km=4, - ... snow=True, - ... pet="hargreaves_samani", - ... ) - >>> clm["pet"].mean().item() - 0.3 - - .. rubric:: References - - .. footbibliography:: - - diff --git a/docs/source/autoapi/pygeohydro/helpers/index.rst b/docs/source/autoapi/pygeohydro/helpers/index.rst deleted file mode 100644 index 10f4771..0000000 --- a/docs/source/autoapi/pygeohydro/helpers/index.rst +++ /dev/null @@ -1,64 +0,0 @@ -pygeohydro.helpers -================== - -.. py:module:: pygeohydro.helpers - -.. autoapi-nested-parse:: - - Some helper function for PyGeoHydro. - - - - - -Module Contents ---------------- - -.. py:function:: get_us_states(subset_key = None) - - Get US states as a GeoDataFrame from Census' TIGERLine 2023 database. - - :Parameters: **subset_key** (:class:`str` or :class:`list` of :class:`str`, *optional*) -- Key to subset the geometries instead of returning all states, by default - all states are returned. Valid keys are: - - - ``contiguous`` or ``conus`` - - ``continental`` - - ``commonwealths`` - - ``territories`` - - Two letter state codes, e.g., ``["TX", "CA", "FL", ...]`` - - :returns: :class:`geopandas.GeoDataFrame` -- GeoDataFrame of requested US states. - - -.. py:function:: nlcd_helper() - - Get legends and properties of the NLCD cover dataset. - - .. rubric:: Notes - - The following references have been used: - - https://github.com/jzmiller1/nlcd - - https://www.mrlc.gov/data-services-page - - https://www.mrlc.gov/data/legends/national-land-cover-database-2016-nlcd2016-legend - - https://doi.org/10.1111/jfr3.12347 - - :returns: :class:`dict` -- Years when data is available and cover classes and categories, and roughness estimations. - - -.. py:function:: nwis_errors() - - Get error code lookup table for USGS sites that have daily values. - - -.. py:function:: states_lookup_table() - - Get codes and names of US states and their counties. - - .. rubric:: Notes - - This function is based on a file prepared by developers of - an R package called `dataRetrieval `__. - - :returns: :class:`pandas.DataFrame` -- State codes and name as a dataframe. - - diff --git a/docs/source/autoapi/pygeohydro/index.rst b/docs/source/autoapi/pygeohydro/index.rst deleted file mode 100644 index 55a9545..0000000 --- a/docs/source/autoapi/pygeohydro/index.rst +++ /dev/null @@ -1,31 +0,0 @@ -pygeohydro -========== - -.. py:module:: pygeohydro - -.. autoapi-nested-parse:: - - Top-level package for PyGeoHydro. - - - -Submodules ----------- - -.. toctree:: - :maxdepth: 1 - - /autoapi/pygeohydro/helpers/index - /autoapi/pygeohydro/levee/index - /autoapi/pygeohydro/nfhl/index - /autoapi/pygeohydro/nid/index - /autoapi/pygeohydro/nlcd/index - /autoapi/pygeohydro/nwis/index - /autoapi/pygeohydro/plot/index - /autoapi/pygeohydro/pygeohydro/index - /autoapi/pygeohydro/stnfloodevents/index - /autoapi/pygeohydro/us_abbrs/index - /autoapi/pygeohydro/waterdata/index - /autoapi/pygeohydro/watershed/index - - diff --git a/docs/source/autoapi/pygeohydro/levee/index.rst b/docs/source/autoapi/pygeohydro/levee/index.rst deleted file mode 100644 index 8f70d2a..0000000 --- a/docs/source/autoapi/pygeohydro/levee/index.rst +++ /dev/null @@ -1,71 +0,0 @@ -pygeohydro.levee -================ - -.. py:module:: pygeohydro.levee - -.. autoapi-nested-parse:: - - Accessing National Flood Hazard Layers (NLD) through web services. - - - - - -Module Contents ---------------- - -.. py:class:: NLD(layer, outfields = '*', crs = 4326) - - - - Access National Levee Database (NLD) services. - - .. rubric:: Notes - - For more info visit: https://geospatial.sec.usace.army.mil/server/rest/services/NLD2_PUBLIC/FeatureServer - - :Parameters: * **layer** (:class:`str`, *optional*) -- A valid service layer. Valid layers are: - - - ``boreholes`` - - ``crossings`` - - ``levee_stations`` - - ``piezometers`` - - ``pump_stations`` - - ``relief_wells`` - - ``alignment_lines`` - - ``closure_structures`` - - ``cross_sections`` - - ``embankments`` - - ``floodwalls`` - - ``frm_lines`` - - ``pipe_gates`` - - ``toe_drains`` - - ``leveed_areas`` - - ``system_routes`` - - ``pipes`` - - ``channels`` - * **outfields** (:class:`str` or :class:`list`, *optional*) -- Target field name(s), default to "*" i.e., all the fields. - * **crs** (:class:`str`, :class:`int`, or :class:`pyproj.CRS`, *optional*) -- Target spatial reference, default to ``EPSG:4326``. - - .. method:: bygeom(geom, geo_crs=4326, sql_clause="", distance=None, return_m=False, return_geom=True) - - Get features within a geometry that can be combined with a SQL where clause. - - .. method:: byids(field, fids, return_m=False, return_geom=True) - - Get features by object IDs. - - .. method:: bysql(sql_clause, return_m=False, return_geom=True) - - Get features using a valid SQL 92 WHERE clause. - - - .. rubric:: Examples - - >>> from pygeohydro import NLD - >>> nld = NLD("levee_stations") - >>> levees = nld.bygeom((-105.914551, 37.437388, -105.807434, 37.522392)) - >>> levees.shape - (1838, 12) - - diff --git a/docs/source/autoapi/pygeohydro/nfhl/index.rst b/docs/source/autoapi/pygeohydro/nfhl/index.rst deleted file mode 100644 index d8d23c0..0000000 --- a/docs/source/autoapi/pygeohydro/nfhl/index.rst +++ /dev/null @@ -1,120 +0,0 @@ -pygeohydro.nfhl -=============== - -.. py:module:: pygeohydro.nfhl - -.. autoapi-nested-parse:: - - Accessing National Flood Hazard Layers (NFHL) through web services. - - - - - -Module Contents ---------------- - -.. py:class:: NFHL(service, layer, outfields = '*', crs = 4326) - - - - Access National Flood Hazard Layers (NFHL). - - :Parameters: * **service** (:class:`str`) -- The service type. Valid services are: - - - ``NFHL``: Effective National Flood Hazard Layers - - ``Prelim_CSLF``: Preliminary Changes Since Last Firm (CSLF) - - ``Draft_CSLF``: Draft Changes Since Last Firm (CSLF) - - ``Prelim_NFHL``: Preliminary National Flood Hazard Layers - - ``Pending_NFHL``: Pending National Flood Hazard Layers - - ``Draft_NFHL``: Draft National Flood Hazard Layers - * **layer** (:class:`str`) -- A valid service layer. Valid layers are service specific: - - - ``NFHL``: ``nfhl availability``, ``firm panels``, ``lomrs``, ``lomas``, - ``political jurisdictions``, ``profile baselines``, ``water lines``, - ``cross-sections``, ``base flood elevations``, ``levees``, - ``seclusion boundaries``, ``coastal transects``, ``transect baselines``, - ``general structures``, ``river mile markers``, ``water areas``, ``plss``, - ``limit of moderate wave action``, ``flood hazard boundaries``, - ``flood hazard zones``, ``primary frontal dunes``, ``base index``, - ``topographic low confidence areas``, ``datum conversion points``, - ``coastal gages``, ``gages``, ``nodes``, ``high water marks``, - ``station start points``, ``hydrologic reaches``, ``alluvial fans``, - and ``subbasins`` - - ``Prelim_CSLF``: ``preliminary``, ``coastal high hazard area change``, - ``floodway change``, ``special flood hazard area change``, - and ``non-special flood hazard area change`` - - ``Draft_CSLF``: ``draft``, ``coastal high hazard area change``, - ``floodway change``, ``special flood hazard area change``, and - ``non-special flood hazard area change`` - - ``Prelim_NFHL``: ``preliminary data availability``, - ``preliminary firm panel index``, ``preliminary plss``, - ``preliminary topographic low confidence areas``, - ``preliminary river mile markers``, ``preliminary datum conversion points``, - ``preliminary coastal gages``, ``preliminary gages``, ``preliminary nodes``, - ``preliminary high water marks``, ``preliminary station start points``, - ``preliminary cross-sections``, ``preliminary coastal transects``, - ``preliminary base flood elevations``, ``preliminary profile baselines``, - ``preliminary transect baselines``, ``preliminary limit of moderate wave action``, - ``preliminary water lines``, ``preliminary political jurisdictions``, - ``preliminary levees``, ``preliminary general structures``, - ``preliminary primary frontal dunes``, ``preliminary hydrologic reaches``, - ``preliminary flood hazard boundaries``, ``preliminary flood hazard zones``, - ``preliminary submittal information``, ``preliminary alluvial fans``, - ``preliminary subbasins``, and ``preliminary water areas`` - - ``Pending_NFHL``: ``pending submittal information``, ``pending water areas``, - ``pending firm panel index``, ``pending data availability``, - ``pending firm panels``, ``pending political jurisdictions``, - ``pending profile baselines``, ``pending water lines``, - ``pending cross-sections``, ``pending base flood elevations``, - ``pending levees``, ``pending seclusion boundaries``, - ``pending coastal transects``, ``pending transect baselines``, - ``pending general structures``, ``pending river mile markers``, - ``pending plss``, ``pending limit of moderate wave action``, - ``pending flood hazard boundaries``, ``pending flood hazard zones``, - ``pending primary frontal dunes``, ``pending topographic low confidence areas``, - ``pending datum conversion points``, ``pending coastal gages``, - ``pending gages``, ``pending nodes``, ``pending high water marks``, - ``pending station start points``, ``pending hydrologic reaches``, - ``pending alluvial fans``, and ``pending subbasins`` - - ``Draft_NFHL``: ``draft data availability``, ``draft firm panels``, - ``draft political jurisdictions``, ``draft profile baselines``, - ``draft water lines``, ``draft cross-sections``, ``draft base flood elevations``, - ``draft levees``, ``draft submittal info``, ``draft coastal transects``, - ``draft transect baselines``, ``draft general structures``, - ``draft limit of moderate wave action``, ``draft flood hazard boundaries``, - and ``draft flood hazard zones`` - * **outfields** (:class:`str` or :class:`list`, *optional*) -- Target field name(s), default to "*" i.e., all the fields. - * **crs** (:class:`str`, :class:`int`, or :class:`pyproj.CRS`, *optional*) -- Target spatial reference of output, default to ``EPSG:4326``. - - .. rubric:: Examples - - >>> from pygeohydro import NFHL - >>> nfhl = NFHL("NFHL", "cross-sections") - >>> gdf_xs = nfhl.bygeom((-73.42, 43.28, -72.9, 43.52), geo_crs=4269) - - .. rubric:: References - - * `National Flood Hazard Layer `__ - - .. method:: bygeom(geom, geo_crs=4326, sql_clause="", distance=None, return_m=False, return_geom=True) - - Get features within a geometry that can be combined with a SQL where clause. - - .. method:: byids(field, fids, return_m=False, return_geom=True) - - Get features by object IDs. - - .. method:: bysql(sql_clause, return_m=False, return_geom=True) - - Get features using a valid SQL 92 WHERE clause. - - - - .. py:property:: valid_services - :type: dict[str, str] - - - A dictionary of valid services and their URLs. - - diff --git a/docs/source/autoapi/pygeohydro/nid/index.rst b/docs/source/autoapi/pygeohydro/nid/index.rst deleted file mode 100644 index 8e41b37..0000000 --- a/docs/source/autoapi/pygeohydro/nid/index.rst +++ /dev/null @@ -1,138 +0,0 @@ -pygeohydro.nid -============== - -.. py:module:: pygeohydro.nid - -.. autoapi-nested-parse:: - - Accessing data from the supported databases through their APIs. - - - - - -Module Contents ---------------- - -.. py:class:: NID - - Retrieve data from the National Inventory of Dams web service. - - - .. py:method:: get_byfilter(query_list) - - Query dams by filters from the National Inventory of Dams web service. - - :Parameters: **query_list** (:class:`list` of :class:`dict`) -- List of dictionary of query parameters. For an exhaustive list of the parameters, - use the advanced fields dataframe that can be accessed via ``NID().fields_meta``. - Some filter require min/max values such as ``damHeight`` and ``drainageArea``. - For such filters, the min/max values should be passed like so: - ``{filter_key: ["[min1 max1]", "[min2 max2]"]}``. - - :returns: :class:`list` of :class:`geopandas.GeoDataFrame` -- Query results in the same order as the input query list. - - .. rubric:: Examples - - >>> from pygeohydro import NID - >>> nid = NID() - >>> query_list = [ - ... {"drainageArea": ["[200 500]"]}, - ... {"nidId": ["CA01222"]}, - ... ] - >>> dam_dfs = nid.get_byfilter(query_list) - - - - .. py:method:: get_bygeom(geometry, geo_crs) - - Retrieve NID data within a geometry. - - :Parameters: * **geometry** (:class:`Polygon`, :class:`MultiPolygon`, or :class:`tuple` of :class:`length 4`) -- Geometry or bounding box (west, south, east, north) for extracting the data. - * **geo_crs** (:class:`list` of :class:`str`) -- The CRS of the input geometry. - - :returns: :class:`geopandas.GeoDataFrame` -- GeoDataFrame of NID data - - .. rubric:: Examples - - >>> from pygeohydro import NID - >>> nid = NID() - >>> dams = nid.get_bygeom((-69.77, 45.07, -69.31, 45.45), 4326) - - - - .. py:method:: get_suggestions(text, context_key = None) - - Get suggestions from the National Inventory of Dams web service. - - .. rubric:: Notes - - This function is useful for exploring and/or narrowing down the filter fields - that are needed to query the dams using ``get_byfilter``. - - :Parameters: * **text** (:class:`str`) -- Text to query for suggestions. - * **context_key** (:class:`str`, *optional*) -- Suggestion context, defaults to empty string, i.e., all context keys. - For a list of valid context keys, see ``NID().fields_meta``. - - :returns: :class:`tuple` of :class:`pandas.DataFrame` -- The suggestions for the requested text as two DataFrames: - First, is suggestions found in the dams properties and - second, those found in the query fields such as states, huc6, etc. - - .. rubric:: Examples - - >>> from pygeohydro import NID - >>> nid = NID() - >>> dams, contexts = nid.get_suggestions("houston", "city") - - - - .. py:method:: inventory_byid(federal_ids) - - Get extra attributes for dams based on their dam ID. - - .. rubric:: Notes - - This function is meant to be used for getting extra attributes for dams. - For example, first you need to use either ``get_bygeom`` or ``get_byfilter`` - to get basic attributes of the target dams. Then you can use this function - to get extra attributes using the ``id`` column of the ``GeoDataFrame`` - that ``get_bygeom`` or ``get_byfilter`` returns. - - :Parameters: **federal_ids** (:class:`list` of :class:`str`) -- List of the target dam Federal IDs. - - :returns: :class:`pandas.DataFrame` -- Dams with extra attributes in addition to the standard NID fields - that other ``NID`` methods return. - - .. rubric:: Examples - - >>> from pygeohydro import NID - >>> nid = NID() - >>> dams = nid.inventory_byid(['KY01232', 'GA02400', 'NE04081', 'IL55070', 'TN05345']) - - - - .. py:method:: stage_nid_inventory(fname = None) - - Download the entire NID inventory data and save to a parquet file. - - :Parameters: **fname** (:class:`str`, :class:`pathlib.Path`, *optional*) -- The path to the file to save the data to, defaults to - ``./cache/full_nid_inventory.parquet``. - - - - .. py:property:: df - - Entire NID inventory (``csv`` version) as a ``pandas.DataFrame``. - - - .. py:property:: gdf - - Entire NID inventory (``gpkg`` version) as a ``geopandas.GeoDataFrame``. - - - .. py:property:: nid_inventory_path - :type: pathlib.Path - - - Path to the NID inventory parquet file. - - diff --git a/docs/source/autoapi/pygeohydro/nlcd/index.rst b/docs/source/autoapi/pygeohydro/nlcd/index.rst deleted file mode 100644 index 1481c91..0000000 --- a/docs/source/autoapi/pygeohydro/nlcd/index.rst +++ /dev/null @@ -1,103 +0,0 @@ -pygeohydro.nlcd -=============== - -.. py:module:: pygeohydro.nlcd - -.. autoapi-nested-parse:: - - Accessing data from the supported databases through their APIs. - - - - - -Module Contents ---------------- - -.. py:function:: cover_statistics(cover_da) - - Percentages of the categorical NLCD cover data. - - :Parameters: **cover_da** (:class:`xarray.DataArray`) -- Land cover DataArray from a LULC Dataset from the ``nlcd_bygeom`` function. - - :returns: :class:`Stats` -- A named tuple with the percentages of the cover classes and categories. - - -.. py:function:: nlcd_area_percent(geo_df, year = 2019, region = 'L48') - - Compute the area percentages of the natural, developed, and impervious areas. - - .. rubric:: Notes - - This function uses imperviousness and land use/land cover data from NLCD - to compute the area percentages of the natural, developed, and impervious areas. - It considers land cover classes of 21 to 24 as urban and the rest as natural. - Then, uses imperviousness percentage to partition the urban area into developed - and impervious areas. So, ``urban = developed + impervious`` and always - ``natural + urban = natural + developed + impervious = 100``. - - :Parameters: * **geo_df** (:class:`geopandas.GeoDataFrame` or :class:`geopandas.GeoSeries`) -- A GeoDataFrame or GeoSeries with the geometry to query. The indices are used - as keys in the output dictionary. - * **year** (:class:`int`, *optional*) -- Year of the NLCD data, defaults to 2019. Available years are 2021, 2019, 2016, - 2013, 2011, 2008, 2006, 2004, and 2001. - * **region** (:class:`str`, *optional*) -- Region in the US that the input geometries are located, defaults to ``L48``. - Valid values are ``L48`` (for CONUS), ``HI`` (for Hawaii), ``AK`` (for Alaska), - and ``PR`` (for Puerto Rico). Both lower and upper cases are acceptable. - - :returns: :class:`pandas.DataFrame` -- A dataframe with the same index as input ``geo_df`` and columns are the area - percentages of the natural, developed, impervious, and urban - (sum of developed and impervious) areas. Sum of urban and natural percentages - is always 100, as well as the sum of natural, developed, and impervious - percentages. - - -.. py:function:: nlcd_bycoords(coords, years = None, region = 'L48', ssl = True) - - Get data from NLCD database (2019). - - :Parameters: * **coords** (:class:`list` of :class:`tuple`) -- List of coordinates in the form of (longitude, latitude). - * **years** (:class:`dict`, *optional*) -- The years for NLCD layers as a dictionary, defaults to - ``{'impervious': [2019], 'cover': [2019], 'canopy': [2019], "descriptor": [2019]}``. - Layers that are not in years are ignored, e.g., ``{'cover': [2016, 2019]}`` returns - land cover data for 2016 and 2019. - * **region** (:class:`str`, *optional*) -- Region in the US that the input geometries are located, defaults to ``L48``. - Valid values are ``L48`` (for CONUS), ``HI`` (for Hawaii), ``AK`` (for Alaska), - and ``PR`` (for Puerto Rico). Both lower and upper cases are acceptable. - * **ssl** (:class:`bool`, *optional*) -- Whether to use SSL for the connection, defaults to ``True``. - - :returns: :class:`geopandas.GeoDataFrame` -- A GeoDataFrame with the NLCD data and the coordinates. - - -.. py:function:: nlcd_bygeom(geometry, resolution = 30, years = None, region = 'L48', crs = 4326, ssl = True) - - Get data from NLCD database (2019). - - :Parameters: * **geometry** (:class:`geopandas.GeoDataFrame` or :class:`geopandas.GeoSeries`) -- A GeoDataFrame or GeoSeries with the geometry to query. The indices are used - as keys in the output dictionary. - * **resolution** (:class:`float`, *optional*) -- The data resolution in meters. The width and height of the output are computed in pixel - based on the geometry bounds and the given resolution. The default is 30 m which is the - native resolution of NLCD data. - * **years** (:class:`dict`, *optional*) -- The years for NLCD layers as a dictionary, defaults to - ``{'impervious': [2019], 'cover': [2019], 'canopy': [2019], "descriptor": [2019]}``. - Layers that are not in years are ignored, e.g., ``{'cover': [2016, 2019]}`` returns - land cover data for 2016 and 2019. - * **region** (:class:`str`, *optional*) -- Region in the US that the input geometries are located, defaults to ``L48``. - Valid values are ``L48`` (for CONUS), ``HI`` (for Hawaii), ``AK`` (for Alaska), - and ``PR`` (for Puerto Rico). Both lower and upper cases are acceptable. - * **crs** (:class:`str`, :class:`int`, or :class:`pyproj.CRS`, *optional*) -- The spatial reference system to be used for requesting the data, defaults to - ``epsg:4326``. - * **ssl** (:class:`bool`, *optional*) -- Whether to use SSL for the connection, defaults to ``True``. - - :returns: :class:`dict` of :class:`xarray.Dataset` or :class:`xarray.Dataset` -- A single or a ``dict`` of NLCD datasets. If dict, the keys are indices - of the input ``GeoDataFrame``. - - -.. py:function:: overland_roughness(cover_da) - - Estimate overland roughness from land cover data. - - :Parameters: **cover_da** (:class:`xarray.DataArray`) -- Land cover DataArray from a LULC Dataset from the ``nlcd_bygeom`` function. - - :returns: :class:`xarray.DataArray` -- Overland roughness - - diff --git a/docs/source/autoapi/pygeohydro/nwis/index.rst b/docs/source/autoapi/pygeohydro/nwis/index.rst deleted file mode 100644 index 916f61f..0000000 --- a/docs/source/autoapi/pygeohydro/nwis/index.rst +++ /dev/null @@ -1,134 +0,0 @@ -pygeohydro.nwis -=============== - -.. py:module:: pygeohydro.nwis - -.. autoapi-nested-parse:: - - Accessing NWIS. - - - - - - - -Module Contents ---------------- - -.. py:class:: NWIS - - Access NWIS web service. - - .. rubric:: Notes - - More information about query parameters and codes that NWIS accepts - can be found at its help - `webpage `__. - - - .. py:method:: get_info(queries, expanded = False, fix_names = True, nhd_info = False) - :classmethod: - - - Send multiple queries to USGS Site Web Service. - - :Parameters: * **queries** (:class:`dict` or :class:`list` of :class:`dict`) -- A single or a list of valid queries. - * **expanded** (:class:`bool`, *optional*) -- Whether to get expanded site information for example drainage area, - default to False. - * **fix_names** (:class:`bool`, *optional*) -- If ``True``, reformat station names and some small annoyances, - defaults to ``True``. - * **nhd_info** (:class:`bool`, *optional*) -- If ``True``, get NHD information for each site, defaults to ``False``. - This will add four new columns: ``nhd_comid``, ``nhd_areasqkm``, - ``nhd_reachcode``, and ``nhd_measure``. Where ``nhd_id`` is the NHD - COMID of the flowline that the site is located in, ``nhd_reachcode`` - is the NHD Reach Code that the site is located in, and ``nhd_measure`` - is the measure along the flowline that the site is located at. - - :returns: :class:`geopandas.GeoDataFrame` -- A correctly typed ``GeoDataFrame`` containing site(s) information. - - - - .. py:method:: get_parameter_codes(keyword) - :classmethod: - - - Search for parameter codes by name or number. - - .. rubric:: Notes - - NWIS guideline for keywords is as follows: - - By default an exact search is made. To make a partial search the term - should be prefixed and suffixed with a % sign. The % sign matches zero - or more characters at the location. For example, to find all with "discharge" - enter %discharge% in the field. % will match any number of characters - (including zero characters) at the location. - - :Parameters: **keyword** (:class:`str`) -- Keyword to search for parameters by name of number. - - :returns: :class:`pandas.DataFrame` -- Matched parameter codes as a dataframe with their description. - - .. rubric:: Examples - - >>> from pygeohydro import NWIS - >>> nwis = NWIS() - >>> codes = nwis.get_parameter_codes("%discharge%") - >>> codes.loc[codes.parameter_cd == "00060", "parm_nm"].iloc[0] - 'Discharge, cubic feet per second' - - - - .. py:method:: get_streamflow(station_ids: collections.abc.Sequence[str] | str, dates: tuple[str, str], freq: str = 'dv', mmd: bool = False, *, to_xarray: Literal[False] = False) -> pandas.DataFrame - get_streamflow(station_ids: collections.abc.Sequence[str] | str, dates: tuple[str, str], freq: str = 'dv', mmd: bool = False, *, to_xarray: Literal[True]) -> xarray.Dataset - :classmethod: - - - Get mean daily streamflow observations from USGS. - - :Parameters: * **station_ids** (:class:`str`, :class:`list`) -- The gage ID(s) of the USGS station. - * **dates** (:class:`tuple`) -- Start and end dates as a tuple (start, end). - * **freq** (:class:`str`, *optional*) -- The frequency of the streamflow data, defaults to ``dv`` (daily values). - Valid frequencies are ``dv`` (daily values), ``iv`` (instantaneous values). - Note that for ``iv`` the time zone for the input dates is assumed to be UTC. - * **mmd** (:class:`bool`, *optional*) -- Convert cms to mm/day based on the contributing drainage area of the stations. - Defaults to False. - * **to_xarray** (:class:`bool`, *optional*) -- Whether to return a xarray.Dataset. Defaults to False. - - :returns: :class:`pandas.DataFrame` or :class:`xarray.Dataset` -- Streamflow data observations in cubic meter per second (cms). The stations that - don't provide the requested discharge data in the target period will be dropped. - Note that when frequency is set to ``iv`` the time zone is converted to UTC. - - - - .. py:method:: retrieve_rdb(url, payloads) - :staticmethod: - - - Retrieve and process requests with RDB format. - - :Parameters: * **url** (:class:`str`) -- Name of USGS REST service, valid values are ``site``, ``dv``, ``iv``, - ``gwlevels``, and ``stat``. Please consult USGS documentation - `here `__ for more information. - * **payloads** (:class:`list` of :class:`dict`) -- List of target payloads. - - :returns: :class:`pandas.DataFrame` -- Requested features as a pandas's DataFrame. - - - -.. py:function:: streamflow_fillna(streamflow, missing_max = 5) - - Fill missing data (NAN) in daily streamflow observations. - - It drops stations with more than ``missing_max`` days missing data - per year. Missing data in the remaining stations, are filled with - day-of-year average over the entire dataset. - - :Parameters: * **streamflow** (:class:`xarray.DataArray` or :class:`pandas.DataFrame` or :class:`pandas.Series`) -- Daily streamflow observations with at least 10 years of daily data. - * **missing_max** (:class:`int`) -- Maximum allowed number of missing daily data per year for filling, - defaults to 5. - - :returns: :class:`xarray.DataArray` or :class:`pandas.DataFrame` or :class:`pandas.Series` -- Streamflow observations with missing data filled for stations with - less than ``missing_max`` days of missing data. - - diff --git a/docs/source/autoapi/pygeohydro/plot/index.rst b/docs/source/autoapi/pygeohydro/plot/index.rst deleted file mode 100644 index d93edb9..0000000 --- a/docs/source/autoapi/pygeohydro/plot/index.rst +++ /dev/null @@ -1,48 +0,0 @@ -pygeohydro.plot -=============== - -.. py:module:: pygeohydro.plot - -.. autoapi-nested-parse:: - - Plot hydrological signatures. - - Plots include daily, monthly and annual hydrograph as well as regime - curve (monthly mean) and flow duration curve. - - - - - -Module Contents ---------------- - -.. py:function:: prepare_plot_data(daily) - - Generate a structured data for plotting hydrologic signatures. - - :Parameters: **daily** (:class:`pandas.Series` or :class:`pandas.DataFrame`) -- The data to be processed - - :returns: :class:`PlotDataType` -- Containing ``daily, ``mean_monthly``, ``ranked``, ``titles``, - and ``units`` fields. - - -.. py:function:: signatures(discharge, precipitation = None, title = None, figsize = None, output = None, close = False) - - Plot hydrological signatures w/ or w/o precipitation. - - Plots includes daily hydrograph, regime curve (mean monthly) and - flow duration curve. The input discharges are converted from cms - to mm/day based on the watershed area, if provided. - - :Parameters: * **discharge** (:class:`pd.DataFrame` or :class:`pd.Series`) -- The streamflows in mm/day. The column names are used as labels - on the plot and the column values should be daily streamflow. - * **precipitation** (:class:`pd.Series`, *optional*) -- Daily precipitation time series in mm/day. If given, the data is - plotted on the second x-axis at the top. - * **title** (:class:`str`, *optional*) -- The plot supertitle. - * **figsize** (:class:`tuple`, *optional*) -- The figure size in inches, defaults to (9, 5). - * **output** (:class:`str`, *optional*) -- Path to save the plot as png, defaults to ``None`` which means - the plot is not saved to a file. - * **close** (:class:`bool`, *optional*) -- Whether to close the figure. - - diff --git a/docs/source/autoapi/pygeohydro/pygeohydro/index.rst b/docs/source/autoapi/pygeohydro/pygeohydro/index.rst deleted file mode 100644 index 2e8e49c..0000000 --- a/docs/source/autoapi/pygeohydro/pygeohydro/index.rst +++ /dev/null @@ -1,164 +0,0 @@ -pygeohydro.pygeohydro -===================== - -.. py:module:: pygeohydro.pygeohydro - -.. autoapi-nested-parse:: - - Accessing data from the supported databases through their APIs. - - - - - - - -Module Contents ---------------- - -.. py:class:: EHydro(data_type = 'points', cache_dir = 'ehydro_cache') - - - - Access USACE Hydrographic Surveys (eHydro). - - .. rubric:: Notes - - For more info visit: https://navigation.usace.army.mil/Survey/Hydro - - .. method:: bygeom(geom, geo_crs=4326, sql_clause="", distance=None, return_m=False, return_geom=True) - - Get features within a geometry that can be combined with a SQL where clause. - - .. method:: byids(field, fids, return_m=False, return_geom=True) - - Get features by object IDs. - - .. method:: bysql(sql_clause, return_m=False, return_geom=True) - - Get features using a valid SQL 92 WHERE clause. - - - :Parameters: * **data_type** (:class:`str`, *optional*) -- Type of the survey data to retrieve, defaults to ``points``. - Note that the ``points`` data type gets the best available point - cloud data, i.e., if ``SurveyPointHD`` is available, it will be - returned, otherwise ``SurveyPoint`` will be returned. - Available types are: - - - ``points``: Point clouds - - ``outlines``: Polygons of survey outlines - - ``contours``: Depth contours - - ``bathymetry``: Bathymetry data - - Note that point clouds are not available for all surveys. - * **cache_dir** (:class:`str` or :class:`pathlib.Path`, *optional*) -- Directory to store the downloaded raw data, defaults to ``./ehydro_cache``. - - - .. py:property:: survey_grid - :type: geopandas.GeoDataFrame - - - Full survey availability on hexagonal grid cells of 35 km resolution. - - -.. py:function:: get_camels() - - Get streaflow and basin attributes of all 671 stations in CAMELS dataset. - - .. rubric:: Notes - - For more info on CAMELS visit: https://ral.ucar.edu/solutions/products/camels - - :returns: :class:`tuple` of :class:`geopandas.GeoDataFrame` and :class:`xarray.Dataset` -- The first is basin attributes as a ``geopandas.GeoDataFrame`` and the second - is streamflow data and basin attributes as an ``xarray.Dataset``. - - -.. py:function:: soil_gnatsgo(layers, geometry, crs = 4326) - - Get US soil data from the gNATSGO dataset. - - .. rubric:: Notes - - This function uses Microsoft's Planetary Computer service to get the data. - The dataset's description and its supported soil properties can be found at: - https://planetarycomputer.microsoft.com/dataset/gnatsgo-rasters - - :Parameters: * **layers** (:class:`list` of :class:`str` or :class:`str`) -- Target layer(s). Available layers can be found at the dataset's website - `here `__. - * **geometry** (:class:`Polygon`, :class:`MultiPolygon`, or :class:`tuple` of :class:`length 4`) -- Geometry or bounding box of the region of interest. - * **crs** (:class:`int`, :class:`str`, or :class:`pyproj.CRS`, *optional*) -- The input geometry CRS, defaults to ``epsg:4326``. - - :returns: :class:`xarray.Dataset` -- Requested soil properties. - - -.. py:function:: soil_properties(properties = '*', soil_dir = 'cache') - - Get soil properties dataset in the United States from ScienceBase. - - .. rubric:: Notes - - This function downloads the source zip files from - `ScienceBase `__ - , extracts the included ``.tif`` files, and return them as an ``xarray.Dataset``. - - :Parameters: * **properties** (:class:`list` of :class:`str` or :class:`str`, *optional*) -- Soil properties to extract, default to "*", i.e., all the properties. - Available properties are ``awc`` for available water capacity, ``fc`` for - field capacity, and ``por`` for porosity. - * **soil_dir** (:class:`str` or :class:`pathlib.Pathlib.Path`) -- Directory to store zip files or if exists read from them, defaults to - ``./cache``. - - -.. py:function:: soil_soilgrids(layers, geometry, geo_crs = 4326) - - Get soil data from SoilGrids for the area of interest. - - .. rubric:: Notes - - For more information on the SoilGrids dataset, visit - `ISRIC `__. - - :Parameters: * **layers** (:class:`list` of :class:`str`) -- SoilGrids layers to get. Available options are: - ``bdod_*``, ``cec_*``, ``cfvo_*``, ``clay_*``, ``nitrogen_*``, ``ocd_*``, - ``ocs_*``, ``phh2o_*``, ``sand_*``, ``silt_*``, and ``soc_*`` where ``*`` - is the depth in cm and can be one of ``5``, ``15``, ``30``, ``60``, - ``100``, or ``200``. For example, ``bdod_5`` is the mean bulk density of - the fine earth fraction at 0-5 cm depth, and ``bdod_200`` is the mean bulk - density of the fine earth fraction at 100-200 cm depth. - * **geometry** (:class:`Polygon`, :class:`MultiPolygon`, or :class:`tuple` of :class:`length 4`) -- Geometry to get DEM within. It can be a polygon or a boundong box - of form (xmin, ymin, xmax, ymax). - * **geo_crs** (:class:`int`, :class:`str`, :class:`of pyproj.CRS`, *optional*) -- CRS of the input geometry, defaults to ``epsg:4326``. - - :returns: :class:`xarray.DataArray` -- The request DEM at the specified resolution. - - -.. py:function:: ssebopeta_bycoords(coords, dates, crs = 4326) - - Daily actual ET for a dataframe of coords from SSEBop database in mm/day. - - :Parameters: * **coords** (:class:`pandas.DataFrame`) -- A dataframe with ``id``, ``x``, ``y`` columns. - * **dates** (:class:`tuple` or :class:`list`, *optional*) -- Start and end dates as a tuple (start, end) or a list of years [2001, 2010, ...]. - * **crs** (:class:`str`, :class:`int`, or :class:`pyproj.CRS`, *optional*) -- The CRS of the input coordinates, defaults to ``epsg:4326``. - - :returns: :class:`xarray.Dataset` -- Daily actual ET in mm/day as a dataset with ``time`` and ``location_id`` dimensions. - The ``location_id`` dimension is the same as the ``id`` column in the input dataframe. - - -.. py:function:: ssebopeta_bygeom(geometry, dates, geo_crs = 4326) - - Get daily actual ET for a region from SSEBop database. - - .. rubric:: Notes - - Since there's still no web service available for subsetting SSEBop, the data first - needs to be downloaded for the requested period then it is masked by the - region of interest locally. Therefore, it's not as fast as other functions and - the bottleneck could be the download speed. - - :Parameters: * **geometry** (:class:`shapely.Polygon` or :class:`tuple`) -- The geometry for downloading clipping the data. For a tuple bbox, - the order should be (west, south, east, north). - * **dates** (:class:`tuple` or :class:`list`, *optional*) -- Start and end dates as a tuple (start, end) or a list of years [2001, 2010, ...]. - * **geo_crs** (:class:`str`, :class:`int`, or :class:`pyproj.CRS`, *optional*) -- The CRS of the input geometry, defaults to ``epsg:4326``. - - :returns: :class:`xarray.DataArray` -- Daily actual ET within a geometry in mm/day at 1 km resolution - - diff --git a/docs/source/autoapi/pygeohydro/stnfloodevents/index.rst b/docs/source/autoapi/pygeohydro/stnfloodevents/index.rst deleted file mode 100644 index e802a16..0000000 --- a/docs/source/autoapi/pygeohydro/stnfloodevents/index.rst +++ /dev/null @@ -1,287 +0,0 @@ -pygeohydro.stnfloodevents -========================= - -.. py:module:: pygeohydro.stnfloodevents - -.. autoapi-nested-parse:: - - Access USGS Short-Term Network (STN) via Restful API. - - - - - - - -Module Contents ---------------- - -.. py:class:: STNFloodEventData - - Client for STN Flood Event Data's RESTFUL Service API. - - Advantages of using this client are: - - - The user does not need to know the details of RESTFUL in - general and of this API specifically. - - Parses the data and returns Python objects - (e.g., pandas.DataFrame, geopandas.GeoDataFrame) instead of JSON. - - Convenience functions are offered for data dictionaries. - - Geo-references the data where applicable. - - .. attribute:: service_url - - The service url of the STN Flood Event Data RESTFUL Service API. - - :type: :class:`str` - - .. attribute:: data_dictionary_url - - The data dictionary url of the STN Flood Event Data RESTFUL Service API. - - :type: :class:`str` - - .. attribute:: service_crs - - The CRS of the data from the service which is ``EPSG:4326``. - - :type: :class:`int` - - .. attribute:: instruments_query_params - - The accepted query parameters for the instruments data type. - Accepted values are ``SensorType``, ``CurrentStatus``, ``States``, - ``Event``, ``County``, ``DeploymentType``, ``EventType``, - ``EventStatus``, and ``CollectionCondition``. - - :type: :class:`set` - - .. attribute:: peaks_query_params - - The accepted query parameters for the peaks data type. - Accepted values are ``EndDate``, ``States``, ``Event``, ``StartDate``, - ``County``, ``EventType``, and ``EventStatus``. - - :type: :class:`set` - - .. attribute:: hwms_query_params - - The accepted query parameters for the hwms data type. - Accepted values are ``EndDate``, ``States``, ``Event``, ``StartDate``, - ``County``, ``EventType``, and ``EventStatus``. - - :type: :class:`set` - - .. attribute:: sites_query_params - - The accepted query parameters for the sites data type. - Accepted values are ``OPDefined``, ``HousingTypeOne``, ``NetworkName``, - ``HousingTypeSeven``, ``RDGOnly``, ``HWMOnly``, ``Event``, - ``SensorOnly``, ``State``, ``SensorType``, and ``HWMSurveyed``. - - :type: :class:`set` - - .. rubric:: Notes - - Point data from the service is assumed to be in the WGS84 - coordinate reference system (``EPSG:4326``). - - .. rubric:: References - - * `USGS Short-Term Network (STN) `__ - * `All Sensors API Documentation `__ - * `All Peak Summary API Documentation `__ - * `All HWM API Documentation `__ - * `All Sites API Documentation `__ - * `USGS Flood Event Viewer: Providing Hurricane and Flood Response Data `__ - * `A USGS guide for finding and interpreting high-water marks `__ - * `High-Water Marks and Flooding `__ - * `Identifying and preserving high-water mark data `__ - - - .. py:method:: get_all_data(data_type: str, *, as_list: Literal[False] = False, crs: CRSType = 4326, async_retriever_kwargs: dict[str, Any] | None = None) -> geopandas.GeoDataFrame | pandas.DataFrame - get_all_data(data_type: str, *, as_list: Literal[True], crs: CRSType = 4326, async_retriever_kwargs: dict[str, Any] | None = None) -> list[dict[str, Any]] - :classmethod: - - - Retrieve all data from the STN Flood Event Data API. - - :Parameters: * **data_type** (:class:`str`) -- The data source from STN Flood Event Data API. - It can be ``instruments``, ``peaks``, ``hwms``, or ``sites``. - * **as_list** (:class:`bool`, *optional*) -- If True, return the data as a list, defaults to False. - * **crs** (:class:`int`, :class:`str`, or :class:`pyproj.CRS`, *optional*) -- Desired Coordinate reference system (CRS) of output. - Only used for GeoDataFrames with ``hwms`` and ``sites`` data types. - * **async_retriever_kwargs** (:class:`dict`, *optional*) -- Additional keyword arguments to pass to - ``async_retriever.retrieve_json()``. The ``url`` and ``request_kwds`` - options are already set. - - :returns: :class:`geopandas.GeoDataFrame` or :class:`pandas.DataFrame` or :class:`list` of :class:`dict` -- The retrieved data as a GeoDataFrame, DataFrame, or a list of dictionaries. - - :raises InputValueError: If the input data_type is not one of - ``instruments``, ``peaks``, ``hwms``, or ``sites`` - - .. seealso:: - - :meth:`~get_filtered_data` - Retrieves filtered data for a given data type. - - :meth:`~data_dictionary` - Retrieves the data dictionary for a given data type. - - .. rubric:: Notes - - Notice schema differences between the data dictionaries, filtered data - queries, and all data queries. This is a known issue and is being addressed - by USGS. - - .. rubric:: Examples - - >>> from pygeohydro.stnfloodevents import STNFloodEventData - >>> data = STNFloodEventData.get_all_data(data_type="instruments") - >>> data.shape[1] - 18 - >>> data.columns - Index(['instrument_id', 'sensor_type_id', 'deployment_type_id', - 'location_description', 'serial_number', 'interval', 'site_id', - 'event_id', 'inst_collection_id', 'housing_type_id', 'sensor_brand_id', - 'vented', 'instrument_status', 'data_files', 'files', 'last_updated', - 'last_updated_by', 'housing_serial_number'], - dtype='object') - - - - .. py:method:: get_filtered_data(data_type: str, query_params: dict[str, Any] | None = None, *, as_list: Literal[False] = False, crs: CRSType = 4326, async_retriever_kwargs: dict[str, Any] | None = None) -> geopandas.GeoDataFrame | pandas.DataFrame - get_filtered_data(data_type: str, query_params: dict[str, Any] | None = None, *, as_list: Literal[True], crs: CRSType = 4326, async_retriever_kwargs: dict[str, Any] | None = None) -> list[dict[str, Any]] - :classmethod: - - - Retrieve filtered data from the STN Flood Event Data API. - - :Parameters: * **data_type** (:class:`str`) -- The data source from STN Flood Event Data API. - It can be ``instruments``, ``peaks``, ``hwms``, or ``sites``. - * **query_params** (:class:`dict`, *optional*) -- RESTFUL API query parameters. For accepted values, see - the STNFloodEventData class attributes :attr:`~instruments_query_params`, - :attr:`~peaks_query_params`, :attr:`~hwms_query_params`, and - :attr:`~sites_query_params` for available values. - - Also, see the API documentation for each data type for more information: - - `instruments `__ - - `peaks `__ - - `hwms `__ - - `sites `__ - * **as_list** (:class:`bool`, *optional*) -- If True, return the data as a list, defaults to False. - * **crs** (:class:`int`, :class:`str`, or :class:`pyproj.CRS`, *optional*) -- Desired Coordinate reference system (CRS) of output. - Only used for GeoDataFrames outputs. - * **async_retriever_kwargs** (:class:`dict`, *optional*) -- Additional keyword arguments to pass to - ``async_retriever.retrieve_json()``. The ``url`` and ``request_kwds`` - options are already set. - - :returns: :class:`geopandas.GeoDataFrame` or :class:`pandas.DataFrame` or :class:`list` of :class:`dict` -- The retrieved data as a GeoDataFrame, DataFrame, or a - list of dictionaries. - - :raises InputValueError: If the input data_type is not one of - ``instruments``, ``peaks``, ``hwms``, or ``sites`` - :raises InputValueError: If any of the input query_params are not in accepted - parameters (See :attr:`~instruments_query_params`, - :attr:`~peaks_query_params`, :attr:`~hwms_query_params`, - or :attr:`~sites_query_params`). - - .. seealso:: - - :meth:`~get_all_data` - Retrieves all data for a given data type. - - :meth:`~data_dictionary` - Retrieves the data dictionary for a given data type. - - .. rubric:: Notes - - Notice schema differences between the data dictionaries, - filtered data queries, and all data queries. This is a known - issue and is being addressed by USGS. - - .. rubric:: Examples - - >>> from pygeohydro.stnfloodevents import STNFloodEventData - >>> query_params = {"States": "SC, CA"} - >>> data = STNFloodEventData.get_filtered_data(data_type="instruments", query_params=query_params) - >>> data.shape[1] - 34 - >>> data.columns - Index(['sensorType', 'deploymentType', 'eventName', 'collectionCondition', - 'housingType', 'sensorBrand', 'statusId', 'timeStamp', 'site_no', - 'latitude', 'longitude', 'siteDescription', 'networkNames', 'stateName', - 'countyName', 'siteWaterbody', 'siteHDatum', 'sitePriorityName', - 'siteZone', 'siteHCollectMethod', 'sitePermHousing', 'instrument_id', - 'sensor_type_id', 'deployment_type_id', 'location_description', - 'serial_number', 'housing_serial_number', 'interval', 'site_id', - 'vented', 'instrument_status', 'data_files', 'files', 'geometry'], - dtype='object') - - - -.. py:function:: stn_flood_event(data_type, query_params = None) - - Retrieve data from the STN Flood Event Data API. - - :Parameters: * **data_type** (:class:`str`) -- The data source from STN Flood Event Data API. - It can be ``instruments``, ``peaks``, ``hwms``, or ``sites``. - * **query_params** (:class:`dict`, *optional*) -- RESTFUL API query parameters, defaults to ``None`` which returns - a ``pandas.DataFrame`` of information about the given ``data_type``. - For accepted values, see the ``STNFloodEventData`` class attributes - :attr:`~.STNFloodEventData.instruments_query_params`, - :attr:`~.STNFloodEventData.peaks_query_params`, - :attr:`~.STNFloodEventData.hwms_query_params`, and - :attr:`~.STNFloodEventData.sites_query_params` for available values. - - Also, see the API documentation for each data type for more information: - - - `instruments `__ - - `peaks `__ - - `hwms `__ - - `sites `__ - - :returns: :class:`geopandas.GeoDataFrame` or :class:`pandas.DataFrame` -- The retrieved data as a GeoDataFrame or DataFrame - (if ``query_params`` is not passed). - - :raises InputValueError: If the input data_type is not one of - ``instruments``, ``peaks``, ``hwms``, or ``sites`` - :raises InputValueError: If any of the input query_params are not in accepted - parameters. - - .. rubric:: References - - * `USGS Short-Term Network (STN) `__ - * `Filtered Sensors API Documentation `__ - * `Peak Summary API Documentation `__ - * `Filtered HWM API Documentation `__ - * `Filtered Sites API Documentation `__ - * `USGS Flood Event Viewer: Providing Hurricane and Flood Response Data `__ - * `A USGS guide for finding and interpreting high-water marks `__ - * `High-Water Marks and Flooding `__ - * `Identifying and preserving high-water mark data `__ - - .. rubric:: Notes - - Notice schema differences between the data dictionaries, - filtered data queries, and all data queries. This is a known - issue and is being addressed by USGS. - - .. rubric:: Examples - - >>> query_params = {"States": "SC, CA"} - >>> data = stn_flood_event("instruments", query_params=query_params) - >>> data.shape[1] - 34 - >>> data.columns - Index(['sensorType', 'deploymentType', 'eventName', 'collectionCondition', - 'housingType', 'sensorBrand', 'statusId', 'timeStamp', 'site_no', - 'latitude', 'longitude', 'siteDescription', 'networkNames', 'stateName', - 'countyName', 'siteWaterbody', 'siteHDatum', 'sitePriorityName', - 'siteZone', 'siteHCollectMethod', 'sitePermHousing', 'instrument_id', - 'sensor_type_id', 'deployment_type_id', 'location_description', - 'serial_number', 'housing_serial_number', 'interval', 'site_id', - 'vented', 'instrument_status', 'data_files', 'files', 'geometry'], - dtype='object') - - diff --git a/docs/source/autoapi/pygeohydro/us_abbrs/index.rst b/docs/source/autoapi/pygeohydro/us_abbrs/index.rst deleted file mode 100644 index bb8f6a8..0000000 --- a/docs/source/autoapi/pygeohydro/us_abbrs/index.rst +++ /dev/null @@ -1,11 +0,0 @@ -pygeohydro.us_abbrs -=================== - -.. py:module:: pygeohydro.us_abbrs - -.. autoapi-nested-parse:: - - US states and territories Abbreviations from ``us`` package. - - - diff --git a/docs/source/autoapi/pygeohydro/waterdata/index.rst b/docs/source/autoapi/pygeohydro/waterdata/index.rst deleted file mode 100644 index 28e7f8a..0000000 --- a/docs/source/autoapi/pygeohydro/waterdata/index.rst +++ /dev/null @@ -1,167 +0,0 @@ -pygeohydro.waterdata -==================== - -.. py:module:: pygeohydro.waterdata - -.. autoapi-nested-parse:: - - Accessing WaterData related APIs. - - - - - -Module Contents ---------------- - -.. py:class:: SensorThings - - Class for interacting with SensorThings API. - - - .. py:method:: odata_helper(columns = None, conditionals = None, expand = None, max_count = None, extra_params = None) - :staticmethod: - - - Generate Odata filters for SensorThings API. - - :Parameters: * **columns** (:class:`list` of :class:`str`, *optional*) -- Columns to be selected from the database, defaults to ``None``. - * **conditionals** (:class:`str`, *optional*) -- Conditionals to be applied to the database, defaults to ``None``. - Note that the conditionals should have the form of - ``cond1 operator 'value' and/or cond2 operator 'value``. - For example: - ``properties/monitoringLocationType eq 'Stream' and ...`` - * **expand** (:class:`dict` of :class:`dict`, *optional*) -- Expand the properties of the selected columns, defaults to ``None``. - Note that the ``expand`` should have the form of - ``{Property: {func: value, ...}}``. For example: ``{"Locations": - {"select": "location", "filter": "ObservedProperty/@iot.id eq '00060'"}}`` - * **max_count** (:class:`int`, *optional*) -- Maximum number of items to be returned, defaults to ``None``. - * **extra_params** (:class:`dict`, *optional*) -- Extra parameters to be added to the Odata filter, defaults to ``None``. - - :returns: **odata** (:class:`dict`) -- Odata filter for the SensorThings API. - - - - .. py:method:: query_byodata(odata, outformat = 'json') - - Query the SensorThings API by Odata filter. - - :Parameters: * **odata** (:class:`str`) -- Odata filter for the SensorThings API. - * **outformat** (:class:`str`, *optional*) -- Format of the response, defaults to ``json``. - Valid values are ``json`` and ``geojson``. - - :returns: :class:`pandas.DataFrame` or :class:`geopandas.GeoDataFrame` -- Requested data. - - - - .. py:method:: sensor_info(sensor_ids) - - Query the SensorThings API by a sensor ID. - - :Parameters: **sensor_ids** (:class:`str` or :class:`list` of :class:`str`) -- A single or list of sensor IDs, e.g., ``USGS-09380000``. - - :returns: :class:`pandas.DataFrame` -- Requested sensor data. - - - - .. py:method:: sensor_property(sensor_property, sensor_ids) - - Query a sensor property. - - :Parameters: * **sensor_property** (:class:`str` or :class:`list` of :class:`str`) -- A sensor property, Valid properties are ``Datastreams``, - ``MultiDatastreams``, ``Locations``, ``HistoricalLocations``, - ``TaskingCapabilities``. - * **sensor_ids** (:class:`str` or :class:`list` of :class:`str`) -- A single or list of sensor IDs, e.g., ``USGS-09380000``. - - :returns: :class:`pandas.DataFrame` -- A dataframe containing the requested property. - - - -.. py:class:: WaterQuality - - Water Quality Web Service https://www.waterqualitydata.us. - - .. rubric:: Notes - - This class has a number of convenience methods to retrieve data from the - Water Quality Data. Since there are many parameter combinations that can be - used to retrieve data, a general method is also provided to retrieve data from - any of the valid endpoints. You can use ``get_json`` to retrieve stations info - as a ``geopandas.GeoDataFrame`` or ``get_csv`` to retrieve stations data as a - ``pandas.DataFrame``. You can construct a dictionary of the parameters and pass - it to one of these functions. For more information on the parameters, please - consult the - `Water Quality Data documentation `__. - - - .. py:method:: data_bystation(station_ids, wq_kwds) - - Retrieve data for a single station. - - :Parameters: * **station_ids** (:class:`str` or :class:`list` of :class:`str`) -- Station ID(s). The IDs should have the format "Agency code-Station ID". - * **wq_kwds** (:class:`dict`, *optional*) -- Water Quality Web Service keyword arguments. Default to None. - - :returns: :class:`pandas.DataFrame` -- DataFrame of data for the stations. - - - - .. py:method:: get_csv(endpoint, kwds, request_method = 'GET') - - Get the CSV response from the Water Quality Web Service. - - :Parameters: * **endpoint** (:class:`str`) -- Endpoint of the Water Quality Web Service. - * **kwds** (:class:`dict`) -- Water Quality Web Service keyword arguments. - * **request_method** (:class:`str`, *optional*) -- HTTP request method. Default to GET. - - :returns: :class:`pandas.DataFrame` -- The web service response as a DataFrame. - - - - .. py:method:: get_json(endpoint, kwds, request_method = 'GET') - - Get the JSON response from the Water Quality Web Service. - - :Parameters: * **endpoint** (:class:`str`) -- Endpoint of the Water Quality Web Service. - * **kwds** (:class:`dict`) -- Water Quality Web Service keyword arguments. - * **request_method** (:class:`str`, *optional*) -- HTTP request method. Default to GET. - - :returns: :class:`geopandas.GeoDataFrame` -- The web service response as a GeoDataFrame. - - - - .. py:method:: get_param_table() - - Get the parameter table from the USGS Water Quality Web Service. - - - - .. py:method:: lookup_domain_values(endpoint) - - Get the domain values for the target endpoint. - - - - .. py:method:: station_bybbox(bbox, wq_kwds) - - Retrieve station info within bounding box. - - :Parameters: * **bbox** (:class:`tuple` of :class:`float`) -- Bounding box coordinates (west, south, east, north) in epsg:4326. - * **wq_kwds** (:class:`dict`, *optional*) -- Water Quality Web Service keyword arguments. Default to None. - - :returns: :class:`geopandas.GeoDataFrame` -- GeoDataFrame of station info within the bounding box. - - - - .. py:method:: station_bydistance(lon, lat, radius, wq_kwds) - - Retrieve station within a radius (decimal miles) of a point. - - :Parameters: * **lon** (:class:`float`) -- Longitude of point. - * **lat** (:class:`float`) -- Latitude of point. - * **radius** (:class:`float`) -- Radius (decimal miles) of search. - * **wq_kwds** (:class:`dict`, *optional*) -- Water Quality Web Service keyword arguments. Default to None. - - :returns: :class:`geopandas.GeoDataFrame` -- GeoDataFrame of station info within the radius of the point. - - - diff --git a/docs/source/autoapi/pygeohydro/watershed/index.rst b/docs/source/autoapi/pygeohydro/watershed/index.rst deleted file mode 100644 index 9103408..0000000 --- a/docs/source/autoapi/pygeohydro/watershed/index.rst +++ /dev/null @@ -1,70 +0,0 @@ -pygeohydro.watershed -==================== - -.. py:module:: pygeohydro.watershed - -.. autoapi-nested-parse:: - - Accessing watershed boundary-level data through web services. - - - - - - - -Module Contents ---------------- - -.. py:class:: WBD(layer, outfields = '*', crs = 4326) - - - - Access Watershed Boundary Dataset (WBD). - - .. rubric:: Notes - - This web service offers Hydrologic Unit (HU) polygon boundaries for - the United States, Puerto Rico, and the U.S. Virgin Islands. - For more info visit: https://hydro.nationalmap.gov/arcgis/rest/services/wbd/MapServer - - :Parameters: * **layer** (:class:`str`, *optional*) -- A valid service layer. Valid layers are: - - - ``wbdline`` - - ``huc2`` - - ``huc4`` - - ``huc6`` - - ``huc8`` - - ``huc10`` - - ``huc12`` - - ``huc14`` - - ``huc16`` - * **outfields** (:class:`str` or :class:`list`, *optional*) -- Target field name(s), default to "*" i.e., all the fields. - * **crs** (:class:`str`, :class:`int`, or :class:`pyproj.CRS`, *optional*) -- Target spatial reference, default to ``EPSG:4326``. - - -.. py:function:: huc_wb_full(huc_lvl) - - Get the full watershed boundary for a given HUC level. - - .. rubric:: Notes - - This function is designed for cases where the full watershed boundary is needed - for a given HUC level. If only a subset of the HUCs is needed, then use - the ``pygeohydro.WBD`` class. The full dataset is downloaded from the National Maps' - `WBD staged products `__. - - :Parameters: **huc_lvl** (:class:`int`) -- HUC level, must be even numbers between 2 and 16. - - :returns: :class:`geopandas.GeoDataFrame` -- The full watershed boundary for the given HUC level. - - -.. py:function:: irrigation_withdrawals() - - Get monthly water use for irrigation at HUC12-level for CONUS. - - .. rubric:: Notes - - Dataset is retrieved from https://doi.org/10.5066/P9FDLY8P. - - diff --git a/docs/source/autoapi/pygeoogc/cache_keys/index.rst b/docs/source/autoapi/pygeoogc/cache_keys/index.rst deleted file mode 100644 index d01e8cb..0000000 --- a/docs/source/autoapi/pygeoogc/cache_keys/index.rst +++ /dev/null @@ -1,32 +0,0 @@ -pygeoogc.cache_keys -=================== - -.. py:module:: pygeoogc.cache_keys - -.. autoapi-nested-parse:: - - Functions for creating unique keys based on web request parameters. - - This module is based on the ``aiohttp-client-cache`` package, which is - licensed under the MIT license. See the ``LICENSE`` file for more details. - - - - - -Module Contents ---------------- - -.. py:function:: create_request_key(method, url, params = None, data = None, json = None) - - Create a unique cache key based on request details. - - :Parameters: * **method** (:class:`str`) -- The HTTP method used in the request. Must be either "GET" or "POST". - * **url** (:class:`str` or :class:`yarl.URL`) -- The URL of the request. - * **params** (:class:`dict` or :class:`list` or :class:`str` or :obj:`None`, *optional*) -- The query parameters of the request. Default is None. - * **data** (:class:`dict` or :obj:`None`, *optional*) -- The data of the request. Default is None. - * **json** (:class:`dict` or :obj:`None`, *optional*) -- The JSON data of the request. Default is None. - - :returns: :class:`str` -- The unique cache key based on the request details. - - diff --git a/docs/source/autoapi/pygeoogc/core/index.rst b/docs/source/autoapi/pygeoogc/core/index.rst deleted file mode 100644 index f453c87..0000000 --- a/docs/source/autoapi/pygeoogc/core/index.rst +++ /dev/null @@ -1,154 +0,0 @@ -pygeoogc.core -============= - -.. py:module:: pygeoogc.core - -.. autoapi-nested-parse:: - - Base classes and function for REST, WMS, and WMF services. - - - - - -Module Contents ---------------- - -.. py:class:: ArcGISRESTfulBase(base_url, layer = None, outformat = 'geojson', outfields = '*', crs = 4326, verbose = False, disable_retry = False) - - Access to an ArcGIS REST service. - - :Parameters: * **base_url** (:class:`str`, *optional*) -- The ArcGIS RESTful service url. The URL must either include a layer number - after the last ``/`` in the url or the target layer must be passed as an - argument. - * **layer** (:class:`int`, *optional*) -- Target layer number, defaults to None. If None layer number must be - included as after the last ``/`` in ``base_url``. - * **outformat** (:class:`str`, *optional*) -- One of the output formats offered by the selected layer. If not correct - a list of available formats is shown, defaults to ``geojson``. - It defaults to ``esriSpatialRelIntersects``. - * **outfields** (:class:`str` or :class:`list`) -- The output fields to be requested. Setting ``*`` as outfields requests - all the available fields which is the default setting. - * **crs** (:class:`str`, :class:`int`, or :class:`pyproj.CRS`, *optional*) -- The spatial reference of the output data, defaults to ``epsg:4326`` - * **verbose** (:class:`bool`, *optional*) -- If True, prints information about the requests and responses, - defaults to False. - * **disable_retry** (:class:`bool`, *optional*) -- If ``True`` in case there are any failed queries, no retrying attempts - is done and object IDs of the failed requests is saved to a text file - which its ipath can be accessed via ``self.failed_path``. - - - .. py:method:: get_features(featureids, return_m = False, return_geom = True) - - Get features based on the feature IDs. - - :Parameters: * **featureids** (:class:`list`) -- List of feature IDs. - * **return_m** (:class:`bool`, *optional*) -- Whether to activate the Return M (measure) in the request, - defaults to ``False``. - * **return_geom** (:class:`bool`, *optional*) -- Whether to return the geometry of the feature, defaults to ``True``. - - :returns: :class:`dict` -- (Geo)json response from the web service. - - - - .. py:method:: get_response(url, payloads, method = 'GET') - - Send payload and get the response. - - - - .. py:method:: initialize_service() - - Initialize the RESTFul service. - - - - .. py:method:: partition_oids(oids) - - Partition feature IDs based on ``self.max_nrecords``. - - - - .. py:method:: retry_failed_requests() - - Retry failed requests. - - - -.. py:class:: WFSBase(url, layer = None, outformat = None, version = '2.0.0', crs = 4326, read_method = 'json', max_nrecords = 1000, validation = True) - - Base class for WFS service. - - :Parameters: * **url** (:class:`str`) -- The base url for the WFS service, for examples: - https://hazards.fema.gov/arcgis/rest/services/public/NFHL/MapServer/WFSServer - * **layer** (:class:`str`) -- The layer from the service to be downloaded, defaults to None which throws - an error and includes all the available layers offered by the service. - * **outformat** (:class:`str`) -- - - The data format to request for data from the service, defaults to None which - throws an error and includes all the available format offered by the service. - * **version** (:class:`str`, *optional*) -- The WFS service version which should be either ``1.0.0``, ``1.1.0``, or - ``2.0.0``. Defaults to ``2.0.0``. - * **crs** (:class:`str`, :class:`int`, or :class:`pyproj.CRS`, *optional*) -- The spatial reference system to be used for requesting the data, defaults to - ``epsg:4326``. - * **read_method** (:class:`str`, *optional*) -- Method for reading the retrieved data, defaults to ``json``. Valid options are - ``json``, ``binary``, and ``text``. - * **max_nrecords** (:class:`int`, *optional*) -- The maximum number of records in a single request to be retrieved from the service, - defaults to 1000. If the number of requested records is greater than this value, - the query will be split into multiple requests. - * **validation** (:class:`bool`, *optional*) -- Validate the input arguments from the WFS service, defaults to True. Set this - to False if you are sure all the WFS settings such as layer and crs are correct - to avoid sending extra requests. - - - .. py:method:: get_service_options() - - Validate input arguments with the WFS service. - - - - .. py:method:: sort_params(sort_attr, nfeatures, start_index) - - Get sort parameters for a WFS request. - - - - .. py:method:: validate_wfs() - - Validate input arguments with the WFS service. - - - -.. py:class:: WMSBase(url, layers = '', outformat = '', version = '1.3.0', crs = 4326, validation = True) - - Base class for accessing a WMS service. - - :Parameters: * **url** (:class:`str`) -- The base url for the WMS service e.g., https://www.mrlc.gov/geoserver/mrlc_download/wms - * **layers** (:class:`str` or :class:`list`, *optional*) -- A layer or a list of layers from the service to be downloaded. You can pass an empty - string to get a list of available layers. - * **outformat** (:class:`str`, *optional*) -- The data format to request for data from the service. You can pass an empty - string to get a list of available output formats. - * **version** (:class:`str`, *optional*) -- The WMS service version which should be either 1.1.1 or 1.3.0, defaults to 1.3.0. - * **crs** (:class:`str`, :class:`int`, or :class:`pyproj.CRS`, *optional*) -- The spatial reference system to be used for requesting the data, defaults to - ``epsg:4326``. - * **validation** (:class:`bool`, *optional*) -- Validate the input arguments from the WMS service, defaults to True. Set this - to False if you are sure all the WMS settings such as layer and crs are correct - to avoid sending extra requests. - - - .. py:method:: get_service_options() - - Validate input arguments with the WMS service. - - - - .. py:method:: get_validlayers() - - Get the layers supported by the WMS service. - - - - .. py:method:: validate_wms() - - Validate input arguments with the WMS service. - - - diff --git a/docs/source/autoapi/pygeoogc/index.rst b/docs/source/autoapi/pygeoogc/index.rst deleted file mode 100644 index 8703cdc..0000000 --- a/docs/source/autoapi/pygeoogc/index.rst +++ /dev/null @@ -1,23 +0,0 @@ -pygeoogc -======== - -.. py:module:: pygeoogc - -.. autoapi-nested-parse:: - - Top-level package for PyGeoOGC. - - - -Submodules ----------- - -.. toctree:: - :maxdepth: 1 - - /autoapi/pygeoogc/cache_keys/index - /autoapi/pygeoogc/core/index - /autoapi/pygeoogc/pygeoogc/index - /autoapi/pygeoogc/utils/index - - diff --git a/docs/source/autoapi/pygeoogc/pygeoogc/index.rst b/docs/source/autoapi/pygeoogc/pygeoogc/index.rst deleted file mode 100644 index 4d1e2f2..0000000 --- a/docs/source/autoapi/pygeoogc/pygeoogc/index.rst +++ /dev/null @@ -1,304 +0,0 @@ -pygeoogc.pygeoogc -================= - -.. py:module:: pygeoogc.pygeoogc - -.. autoapi-nested-parse:: - - Base classes and function for REST, WMS, and WMF services. - - - - - -Module Contents ---------------- - -.. py:class:: ArcGISRESTful(base_url, layer = None, outformat = 'geojson', outfields = '*', crs = 4326, verbose = False, disable_retry = False) - - Access to an ArcGIS REST service. - - .. rubric:: Notes - - By default, all retrieval methods retry to get the missing feature IDs, - if there are any. You can disable this behavior by setting ``disable_retry`` - to ``True``. If there are any missing feature IDs after the retry, - they are saved to a text file, ipath of which can be accessed by - ``self.client.failed_path``. - - :Parameters: * **base_url** (:class:`str`, *optional*) -- The ArcGIS RESTful service url. The URL must either include a layer number - after the last ``/`` in the url or the target layer must be passed as an argument. - * **layer** (:class:`int`, *optional*) -- Target layer number, defaults to None. If None layer number must be included as after - the last ``/`` in ``base_url``. - * **outformat** (:class:`str`, *optional*) -- One of the output formats offered by the selected layer. If not correct - a list of available formats is shown, defaults to ``geojson``. - * **outfields** (:class:`str` or :class:`list`) -- The output fields to be requested. Setting ``*`` as outfields requests - all the available fields which is the default behaviour. - * **crs** (:class:`str`, :class:`int`, or :class:`pyproj.CRS`, *optional*) -- The spatial reference of the output data, defaults to ``epsg:4326``. - * **verbose** (:class:`bool`, *optional*) -- If True, prints information about the requests and responses, - defaults to False. - * **disable_retry** (:class:`bool`, *optional*) -- If ``True`` in case there are any failed queries, no retrying attempts - is done and object IDs of the failed requests is saved to a text file - which its ipath can be accessed via ``self.client.failed_path``. - - - .. py:method:: get_features(featureids, return_m = False, return_geom = True) - - Get features based on the feature IDs. - - :Parameters: * **featureids** (:class:`list`) -- List of feature IDs. - * **return_m** (:class:`bool`, *optional*) -- Whether to activate the Return M (measure) in the request, - defaults to ``False``. - * **return_geom** (:class:`bool`, *optional*) -- Whether to return the geometry of the feature, defaults to ``True``. - - :returns: :class:`dict` -- (Geo)json response from the web service. - - - - .. py:method:: oids_byfield(field, ids) - - Get Object IDs based on a list of field IDs. - - :Parameters: * **field** (:class:`str`) -- Name of the target field that IDs belong to. - * **ids** (:class:`str` or :class:`list`) -- A list of target ID(s). - - :returns: :class:`list` of :class:`tuples` -- A list of feature IDs partitioned by ``self.max_nrecords``. - - - - .. py:method:: oids_bygeom(geom, geo_crs = 4326, spatial_relation = 'esriSpatialRelIntersects', sql_clause = None, distance = None) - - Get feature IDs within a geometry that can be combined with a SQL where clause. - - :Parameters: * **geom** (:class:`LineString`, :class:`Polygon`, :class:`Point`, :class:`MultiPoint`, :class:`tuple`, or :class:`list` of :class:`tuples`) -- A geometry (LineString, Polygon, Point, MultiPoint), tuple of length two - (``(x, y)``), a list of tuples of length 2 (``[(x, y), ...]``), or bounding box - (tuple of length 4 (``(xmin, ymin, xmax, ymax)``)). - * **geo_crs** (:class:`str`, :class:`int`, or :class:`pyproj.CRS`, *optional*) -- The spatial reference of the input geometry, defaults to ``epsg:4326``. - * **spatial_relation** (:class:`str`, *optional*) -- The spatial relationship to be applied on the input geometry - while performing the query. If not correct a list of available options is shown. - It defaults to ``esriSpatialRelIntersects``. Valid predicates are: - - * ``esriSpatialRelIntersects`` - * ``esriSpatialRelContains`` - * ``esriSpatialRelCrosses`` - * ``esriSpatialRelEnvelopeIntersects`` - * ``esriSpatialRelIndexIntersects`` - * ``esriSpatialRelOverlaps`` - * ``esriSpatialRelTouches`` - * ``esriSpatialRelWithin`` - * ``esriSpatialRelRelation`` - * **sql_clause** (:class:`str`, *optional*) -- Valid SQL 92 WHERE clause, default to None. - * **distance** (:class:`int`, *optional*) -- Buffer distance in meters for the input geometries, default to None. - - :returns: :class:`list` of :class:`tuples` -- A list of feature IDs partitioned by ``self.max_nrecords``. - - - - .. py:method:: oids_bysql(sql_clause) - - Get feature IDs using a valid SQL 92 WHERE clause. - - .. rubric:: Notes - - Not all web services support this type of query. For more details look - `here `__. - - :Parameters: **sql_clause** (:class:`str`) -- A valid SQL 92 WHERE clause. - - :returns: :class:`list` of :class:`tuples` -- A list of feature IDs partitioned by ``self.max_nrecords``. - - - - .. py:method:: partition_oids(oids) - - Partition feature IDs based on ``self.max_nrecords``. - - :Parameters: **oids** (:class:`list` of :class:`int` or :class:`int`) -- A list of feature ID(s). - - :returns: :class:`list` of :class:`tuples` -- A list of feature IDs partitioned by ``self.max_nrecords``. - - - -.. py:class:: HttpURLs - - URLs of the supported HTTP services. - - -.. py:class:: RESTfulURLs - - URLs of the supported RESTful services. - - -.. py:class:: ServiceURL - - URLs of the supported services. - - -.. py:class:: WFS(url, layer = None, outformat = None, version = '2.0.0', crs = 4326, read_method = 'json', max_nrecords = 1000, validation = True) - - - - Data from any WFS service within a geometry or by featureid. - - :Parameters: * **url** (:class:`str`) -- The base url for the WFS service, for examples: - https://hazards.fema.gov/nfhl/services/public/NFHL/MapServer/WFSServer - * **layer** (:class:`str`) -- The layer from the service to be downloaded, defaults to None which throws - an error and includes all the available layers offered by the service. - * **outformat** (:class:`str`) -- - - The data format to request for data from the service, defaults to None which - throws an error and includes all the available format offered by the service. - * **version** (:class:`str`, *optional*) -- The WFS service version which should be either 1.0.0, 1.1.0, or 2.0.0. - Defaults to 2.0.0. - * **crs** (:class:`str`, :class:`int`, or :class:`pyproj.CRS`, *optional*) -- The spatial reference system to be used for requesting the data, defaults to - ``epsg:4326``. - * **read_method** (:class:`str`, *optional*) -- Method for reading the retrieved data, defaults to ``json``. Valid options are - ``json``, ``binary``, and ``text``. - * **max_nrecords** (:class:`int`, *optional*) -- The maximum number of records in a single request to be retrieved from the service, - defaults to 1000. If the number of records requested is greater than this value, - it will be split into multiple requests. - * **validation** (:class:`bool`, *optional*) -- Validate the input arguments from the WFS service, defaults to True. Set this - to False if you are sure all the WFS settings such as layer and crs are correct - to avoid sending extra requests. - - - .. py:method:: getfeature_bybox(bbox, box_crs = 4326, always_xy = False, sort_attr = None) - - Get data from a WFS service within a bounding box. - - :Parameters: * **bbox** (:class:`tuple`) -- A bounding box for getting the data: [west, south, east, north] - * **box_crs** (:class:`str`, or :class:`pyproj.CRS`, *optional*) -- The spatial reference system of the input bbox, defaults to - ``epsg:4326``. - * **always_xy** (:class:`bool`, *optional*) -- Whether to always use xy axis order, defaults to False. Some services change the axis - order from xy to yx, following the latest WFS version specifications but some don't. - If the returned value does not have any geometry, it indicates that most probably the - axis order does not match. You can set this to True in that case. - * **sort_attr** (:class:`str`, *optional*) -- The column name in the database to sort request by, defaults - to the first attribute in the schema that contains ``id`` in its name. - - :returns: :class:`list` of :class:`str` or :class:`bytes` or :class:`dict` -- WFS query response within a bounding box. - - - - .. py:method:: getfeature_byfilter(cql_filter, method = 'GET', sort_attr = None) - - Get features based on a valid CQL filter. - - .. rubric:: Notes - - The validity of the input CQL expression is user's responsibility since - the function does not perform any checks and just sends a request using - the input filter. - - :Parameters: * **cql_filter** (:class:`str`) -- A valid CQL filter expression. - * **method** (:class:`str`) -- The request method, could be GET or POST (for long filters). - * **sort_attr** (:class:`str`, *optional*) -- The column name in the database to sort request by, defaults - to the first attribute in the schema that contains ``id`` in its name. - - :returns: :class:`str` or :class:`bytes` or :class:`dict` -- WFS query response - - - - .. py:method:: getfeature_bygeom(geometry, geo_crs = 4326, always_xy = False, predicate = 'INTERSECTS', sort_attr = None) - - Get features based on a geometry. - - :Parameters: * **geometry** (:class:`shapely.Polygon` or :class:`shapely.MultiPolygon`) -- The input geometry - * **geo_crs** (:class:`str`, or :class:`pyproj.CRS`, *optional*) -- The CRS of the input geometry, default to ``epsg:4326``. - * **always_xy** (:class:`bool`, *optional*) -- Whether to always use xy axis order, defaults to False. Some services change the axis - order from xy to yx, following the latest WFS version specifications but some don't. - If the returned value does not have any geometry, it indicates that most probably the - axis order does not match. You can set this to True in that case. - * **predicate** (:class:`str`, *optional*) -- The geometric predicate to use for requesting the data, defaults to ``INTERSECTS``. - Valid predicates are: - - * ``EQUALS`` - * ``DISJOINT`` - * ``INTERSECTS`` - * ``TOUCHES`` - * ``CROSSES`` - * ``WITHIN`` - * ``CONTAINS`` - * ``OVERLAPS`` - * ``RELATE`` - * ``BEYOND`` - * **sort_attr** (:class:`str`, *optional*) -- The column name in the database to sort request by, defaults - to the first attribute in the schema that contains ``id`` in its name. - - :returns: :class:`str` or :class:`bytes` or :class:`dict` -- WFS query response based on the given geometry. - - - - .. py:method:: getfeature_byid(featurename, featureids) - - Get features based on feature IDs. - - :Parameters: * **featurename** (:class:`str`) -- The name of the column for searching for feature IDs. - * **featureids** (:class:`int`, :class:`str`, or :class:`list` of :class:`them`) -- The feature ID(s). - - :returns: :class:`str` or :class:`bytes` or :class:`dict` -- WMS query response. - - - -.. py:class:: WFSURLs - - URLs of the supported WFS services. - - -.. py:class:: WMS(url, layers, outformat, version = '1.3.0', crs = 4326, validation = True, ssl = True) - - Get data from a WMS service within a geometry or bounding box. - - :Parameters: * **url** (:class:`str`) -- The base url for the WMS service e.g., https://www.mrlc.gov/geoserver/mrlc_download/wms - * **layers** (:class:`str` or :class:`list`) -- A layer or a list of layers from the service to be downloaded. You can pass an empty - string to get a list of available layers. - * **outformat** (:class:`str`) -- The data format to request for data from the service. You can pass an empty - string to get a list of available output formats. - * **crs** (:class:`str`, :class:`int`, or :class:`pyproj.CRS`, *optional*) -- The spatial reference system to be used for requesting the data, defaults to - ``epsg:4326``. - * **version** (:class:`str`, *optional*) -- The WMS service version which should be either 1.1.1 or 1.3.0, defaults to 1.3.0. - * **validation** (:class:`bool`, *optional*) -- Validate the input arguments from the WMS service, defaults to True. Set this - to False if you are sure all the WMS settings such as layer and crs are correct - to avoid sending extra requests. - * **ssl** (:class:`bool`, *optional*) -- Whether to use SSL for the connection, defaults to ``True``. - - - .. py:method:: get_validlayers() - - Get the layers supported by the WMS service. - - - - .. py:method:: getmap_bybox(bbox: tuple[float, float, float, float], resolution: float, box_crs: CRSType = ..., always_xy: bool = ..., max_px: int = ..., kwargs: dict[str, Any] | None = ..., tiff_dir: Literal[None] = None) -> dict[str, bytes] - getmap_bybox(bbox: tuple[float, float, float, float], resolution: float, box_crs: CRSType = ..., always_xy: bool = ..., max_px: int = ..., kwargs: dict[str, Any] | None = ..., tiff_dir: str | pathlib.Path = ...) -> list[pathlib.Path] - - Get data from a WMS service within a geometry or bounding box. - - :Parameters: * **bbox** (:class:`tuple`) -- A bounding box for getting the data. - * **resolution** (:class:`float`) -- The output resolution in meters. The width and height of output are computed in pixel - based on the geometry bounds and the given resolution. - * **box_crs** (:class:`str`, :class:`int`, or :class:`pyproj.CRS`, *optional*) -- The spatial reference system of the input bbox, defaults to - ``epsg:4326``. - * **always_xy** (:class:`bool`, *optional*) -- Whether to always use xy axis order, defaults to False. Some services change the axis - order from xy to yx, following the latest WFS version specifications but some don't. - If the returned value does not have any geometry, it indicates that most probably the - axis order does not match. You can set this to True in that case. - * **max_px** (:class:`int`, *optional*) -- The maximum allowable number of pixels (width x height) for a WMS requests, - defaults to 8 million based on some trial-and-error. - * **kwargs** (:class:`dict`, *optional*) -- Optional additional keywords passed as payload, defaults to None. - For example, ``{"styles": "default"}``. - * **tiff_dir** (:class:`str` or :class:`pathlib.Path`, *optional*) -- If given, the retrieved data will be stored on disk instead of - returning it, defaults to ``None``, i.e., saving to memory - and returning the data. - - :returns: :class:`dict` of :class:`bytes` or :class:`list` of :class:`pathlib.Path` -- If ``to_disk=False``, a dict where the keys are the layer name and - values are the returned response from the WMS service as bytes. - If ``to_disk=True``, a list of pathlib.Path objects to the saved files. - - - -.. py:class:: WMSURLs - - URLs of the supported WMS services. - - diff --git a/docs/source/autoapi/pygeoogc/utils/index.rst b/docs/source/autoapi/pygeoogc/utils/index.rst deleted file mode 100644 index d7aad6a..0000000 --- a/docs/source/autoapi/pygeoogc/utils/index.rst +++ /dev/null @@ -1,166 +0,0 @@ -pygeoogc.utils -============== - -.. py:module:: pygeoogc.utils - -.. autoapi-nested-parse:: - - Some utilities for PyGeoOGC. - - - - - - - -Module Contents ---------------- - -.. py:class:: RetrySession(retries = 3, backoff_factor = 0.3, status_to_retry = (500, 502, 504), prefixes = ('https://', ), cache_name = None, expire_after = EXPIRE_AFTER, disable = False, ssl = True) - - Configures the passed-in session to retry on failed requests. - - .. rubric:: Notes - - The fails can be due to connection errors, specific HTTP response - codes and 30X redirections. The code was originally based on: - https://github.com/bustawin/retry-requests - - :Parameters: * **retries** (:class:`int`, *optional*) -- The number of maximum retries before raising an exception, defaults to 5. - * **backoff_factor** (:class:`float`, *optional*) -- A factor used to compute the waiting time between retries, defaults to 0.5. - * **status_to_retry** (:class:`tuple`, *optional*) -- A tuple of status codes that trigger the reply behaviour, defaults to (500, 502, 504). - * **prefixes** (:class:`tuple`, *optional*) -- The prefixes to consider, defaults to ("http://", "https://") - * **cache_name** (:class:`str`, *optional*) -- Path to a folder for caching the session, default to None which uses - system's temp directory. - * **expire_after** (:class:`int`, *optional*) -- Expiration time for the cache in seconds, defaults to -1 (never expire). - * **disable** (:class:`bool`, *optional*) -- If ``True`` temporarily disable caching request/responses, defaults to ``False``. - * **ssl** (:class:`bool`, *optional*) -- If ``True`` verify SSL certificates, defaults to ``True``. - - - .. py:method:: close() - - Close the session. - - - - .. py:method:: get(url, payload = None, params = None, headers = None, stream = None) - - Retrieve data from a url by GET and return the Response. - - - - .. py:method:: head(url, params = None, data = None, json = None, headers = None) - - Retrieve data from a url by POST and return the Response. - - - - .. py:method:: post(url, payload = None, data = None, json = None, headers = None, stream = None) - - Retrieve data from a url by POST and return the Response. - - - - .. py:property:: disable - :type: bool - - - Disable caching request/responses. - - -.. py:function:: match_crs(geom, in_crs, out_crs) - - Reproject a geometry to another CRS. - - :Parameters: * **geom** (:class:`list` or :class:`tuple` or :class:`geometry`) -- Input geometry which could be a list of coordinates such as ``[(x1, y1), ...]``, - a bounding box like so ``(xmin, ymin, xmax, ymax)``, or any valid ``shapely``'s - geometry such as ``Polygon``, ``MultiPolygon``, etc.. - * **in_crs** (:class:`str`, :class:`int`, or :class:`pyproj.CRS`) -- Spatial reference of the input geometry - * **out_crs** (:class:`str`, :class:`int`, or :class:`pyproj.CRS`) -- Target spatial reference - - :returns: :class:`same type as the input geometry` -- Transformed geometry in the target CRS. - - .. rubric:: Examples - - >>> from shapely import Point - >>> point = Point(-7766049.665, 5691929.739) - >>> match_crs(point, 3857, 4326).xy - (array('d', [-69.7636111130079]), array('d', [45.44549114818127])) - >>> bbox = (-7766049.665, 5691929.739, -7763049.665, 5696929.739) - >>> match_crs(bbox, 3857, 4326) - (-69.7636111130079, 45.44549114818127, -69.73666165448431, 45.47699468552394) - >>> coords = [(-7766049.665, 5691929.739)] - >>> match_crs(coords, 3857, 4326) - [(-69.7636111130079, 45.44549114818127)] - - -.. py:function:: streaming_download(urls: str, kwds: dict[str, dict[Any, Any]] | None = None, fnames: str | pathlib.Path | None = None, root_dir: str | pathlib.Path | None = None, file_prefix: str = '', file_extention: str = '', method: Literal['GET', 'POST', 'get', 'post'] = 'GET', ssl: bool = True, chunk_size: int = CHUNK_SIZE, n_jobs: int = MAX_CONN) -> pathlib.Path | None - streaming_download(urls: list[str], kwds: list[dict[str, dict[Any, Any]]] | None = None, fnames: collections.abc.Sequence[str | pathlib.Path] | None = None, root_dir: str | pathlib.Path | None = None, file_prefix: str = '', file_extention: str = '', method: Literal['GET', 'POST', 'get', 'post'] = 'GET', ssl: bool = True, chunk_size: int = CHUNK_SIZE, n_jobs: int = MAX_CONN) -> list[pathlib.Path | None] - - Download and store files in parallel from a list of URLs/Keywords. - - .. rubric:: Notes - - This function runs asynchronously in parallel using ``n_jobs`` threads. - - :Parameters: * **urls** (:class:`tuple` or :class:`list`) -- A list of URLs to download. - * **kwds** (:class:`tuple` or :class:`list`, *optional*) -- A list of keywords associated with each URL, e.g., - ({"params": ..., "headers": ...}, ...). Defaults to ``None``. - * **fnames** (:class:`tuple` or :class:`list`, *optional*) -- A list of filenames associated with each URL, e.g., - ("file1.zip", ...). Defaults to ``None``. If not provided, - random unique filenames will be generated based on - URL and keyword pairs. - * **root_dir** (:class:`str` or :class:`Path`, *optional*) -- Root directory to store the files, defaults to ``None`` which - uses HyRiver's cache directory. Note that you should either - provide ``root_dir`` or ``fnames``. If both are provided, - ``root_dir`` will be ignored. - * **file_prefix** (:class:`str`, *optional*) -- Prefix to add to filenames when storing the files, defaults - to ``None``, i.e., no prefix. This argument will be only be - used if ``fnames`` is not passed. - * **file_extention** (:class:`str`, *optional*) -- Extension to use for storing the files, defaults to ``None``, - i.e., no extension if ``fnames`` is not provided otherwise. This - argument will be only be used if ``fnames`` is not passed. - * **method** (:class:`str`, *optional*) -- HTTP method to use, i.e, ``GET`` or ``POST``, by default "GET". - * **ssl** (:class:`bool`, *optional*) -- Whether to use SSL verification, defaults to ``True``. - * **chunk_size** (:class:`int`, *optional*) -- Chunk size to use when downloading, defaults to 100 * 1024 * 1024 - i.e., 100 MB. - * **n_jobs** (:class:`int`, *optional*) -- The maximum number of concurrent downloads, defaults to 10. - - :returns: :class:`list` -- A list of ``pathlib.Path`` objects associated with URLs in the - same order. - - -.. py:function:: traverse_json(json_data, ipath) - - Extract an element from a JSON-like object along a specified ipath. - - This function is based on - `bcmullins `__. - - :Parameters: * **json_data** (:class:`dict` or :class:`list` of :class:`dicts`) -- The input json dictionary. - * **ipath** (:class:`list`) -- The ipath to the requested element. - - :returns: :class:`list` -- The sub-items founds in the JSON. - - .. rubric:: Examples - - >>> data = [ - ... {"employees": [ - ... {"name": "Alice", "role": "dev", "nbr": 1}, - ... {"name": "Bob", "role": "dev", "nbr": 2}, - ... ],}, - ... {"firm": {"name": "Charlie's Waffle Emporium", "location": "CA"}}, - ... ] - >>> traverse_json(data, ["employees", "name"]) - [['Alice', 'Bob'], [None]] - - -.. py:function:: validate_crs(crs) - - Validate a CRS. - - :Parameters: **crs** (:class:`str`, :class:`int`, or :class:`pyproj.CRS`) -- Input CRS. - - :returns: :class:`str` -- Validated CRS as a string. - - diff --git a/docs/source/autoapi/pygeoutils/geotools/index.rst b/docs/source/autoapi/pygeoutils/geotools/index.rst deleted file mode 100644 index 8bbc432..0000000 --- a/docs/source/autoapi/pygeoutils/geotools/index.rst +++ /dev/null @@ -1,181 +0,0 @@ -pygeoutils.geotools -=================== - -.. py:module:: pygeoutils.geotools - -.. autoapi-nested-parse:: - - Some utilities for manipulating GeoSpatial data. - - - - - - - -Module Contents ---------------- - -.. py:class:: Coordinates - - Generate validated and normalized coordinates in WGS84. - - :Parameters: * **lon** (:class:`float` or :class:`list` of :class:`floats`) -- Longitude(s) in decimal degrees. - * **lat** (:class:`float` or :class:`list` of :class:`floats`) -- Latitude(s) in decimal degrees. - * **bounds** (:class:`tuple` of :class:`length 4`, *optional*) -- The bounding box to check of the input coordinates fall within. - Defaults to WGS84 bounds. - - .. rubric:: Examples - - >>> c = Coordinates([460, 20, -30], [80, 200, 10]) - >>> c.points.x.tolist() - [100.0, -30.0] - - - .. py:property:: points - :type: geopandas.GeoSeries - - - Get validate coordinate as a ``geopandas.GeoSeries``. - - -.. py:function:: break_lines(lines, points, tol = 0.0) - - Break lines at specified points at given direction. - - :Parameters: * **lines** (:class:`geopandas.GeoDataFrame`) -- Lines to break at intersection points. - * **points** (:class:`geopandas.GeoDataFrame`) -- Points to break lines at. It must contain a column named ``direction`` - with values ``up`` or ``down``. This column is used to determine which - part of the lines to keep, i.e., upstream or downstream of points. - * **tol** (:class:`float`, *optional*) -- Tolerance for snapping points to the nearest lines in meters. - The default is 0.0. - - :returns: :class:`geopandas.GeoDataFrame` -- Original lines except for the parts that have been broken at the specified - points. - - -.. py:function:: coords_list(coords) - - Convert a single coordinate or list of coordinates to a list of coordinates. - - :Parameters: **coords** (:class:`tuple` of :class:`list` of :class:`tuple`) -- Input coordinates - - :returns: :class:`list` of :class:`tuple` -- List of coordinates as ``[(x1, y1), ...]``. - - -.. py:function:: geo2polygon(geometry, geo_crs = None, crs = None) - - Return a Shapely geometry and optionally transform to a new CRS. - - :Parameters: * **geometry** (:class:`shaple.Geometry` or :class:`tuple` of :class:`length 4`) -- Any shapely geometry object or a bounding box (minx, miny, maxx, maxy). - * **geo_crs** (:class:`int`, :class:`str`, or :class:`pyproj.CRS`, *optional*) -- Spatial reference of the input geometry, defaults to ``None``. - * **crs** (:class:`int`, :class:`str`, or :class:`pyproj.CRS`) -- Target spatial reference, defaults to ``None``. - - :returns: :class:`shapely.geometry.base.BaseGeometry` -- A shapely geometry object. - - -.. py:function:: geo_transform(geometry: shapely.geometry.base.BaseGeometry, in_crs: CRSType, out_crs: CRSType, include_z: bool = False) -> shapely.geometry.base.BaseGeometry - geo_transform(geometry: numpy.typing.NDArray[shapely.geometry.base.BaseGeometry], in_crs: CRSType, out_crs: CRSType, include_z: bool = False) -> numpy.typing.NDArray[shapely.geometry.base.BaseGeometry] - - Transform a geometry from one CRS to another. - - :Parameters: * **geometry** (:class:`shapely.geometry.base.BaseGeometry`) -- The geometry or an array of geometries to transform. - * **in_crs** (:class:`int`) -- The CRS of the input geometry. - * **out_crs** (:class:`int`) -- The CRS to which the input geometry will be transformed. - * **include_z** (:class:`bool`, *optional*) -- Whether to include the Z coordinate in the transformation, by default False. - - :returns: :class:`shapely.geometry.base.BaseGeometry` or :class:`numpy.ndarray` -- The transformed geometry or an array of transformed geometries. - - -.. py:function:: geometry_list(geometry) - - Convert input geometry to a list of Polygons, Points, or LineStrings. - - :Parameters: **geometry** (:class:`Polygon` or :class:`MultiPolygon` or :class:`tuple` of :class:`length 4` or :class:`list` of :class:`tuples` of :class:`length 2` or ``3``) -- Input geometry could be a ``(Multi)Polygon``, ``(Multi)LineString``, - ``(Multi)Point``, a tuple/list of length 4 (west, south, east, north), - or a list of tuples of length 2 or 3. - - :returns: :class:`list` -- A list of Polygons, Points, or LineStrings. - - -.. py:function:: geometry_reproject(geom, in_crs, out_crs) - - Reproject a geometry to another CRS. - - :Parameters: * **geom** (:class:`list` or :class:`tuple` or :class:`any shapely.GeometryType`) -- Input geometry could be a list of coordinates such as ``[(x1, y1), ...]``, - a bounding box like so ``(xmin, ymin, xmax, ymax)``, or any valid ``shapely``'s - geometry such as ``Polygon``, ``MultiPolygon``, etc.. - * **in_crs** (:class:`str`, :class:`int`, or :class:`pyproj.CRS`) -- Spatial reference of the input geometry - * **out_crs** (:class:`str`, :class:`int`, or :class:`pyproj.CRS`) -- Target spatial reference - - :returns: :class:`same type as the input geometry` -- Transformed geometry in the target CRS. - - .. rubric:: Examples - - >>> from shapely import Point - >>> point = Point(-7766049.665, 5691929.739) - >>> geometry_reproject(point, 3857, 4326).xy - (array('d', [-69.7636111130079]), array('d', [45.44549114818127])) - >>> bbox = (-7766049.665, 5691929.739, -7763049.665, 5696929.739) - >>> geometry_reproject(bbox, 3857, 4326) - (-69.7636111130079, 45.44549114818127, -69.73666165448431, 45.47699468552394) - >>> coords = [(-7766049.665, 5691929.739)] - >>> geometry_reproject(coords, 3857, 4326) - [(-69.7636111130079, 45.44549114818127)] - - -.. py:function:: multi2poly(gdf) - - Convert multipolygons to polygon and fill holes, if any. - - .. rubric:: Notes - - This function tries to convert multipolygons to polygons by - first checking if multiploygons can be directly converted using - their exterior boundaries. If not, will try to remove very small - sub-polygons that their area is less than 1% of the total area - of the multipolygon. If this fails, the original multipolygon will - be returned. - - :Parameters: **gdf** (:class:`geopandas.GeoDataFrame` or :class:`geopandas.GeoSeries`) -- A GeoDataFrame or GeoSeries with (multi)polygons. This will be - more accurate if the CRS is projected. - - :returns: :class:`geopandas.GeoDataFrame` or :class:`geopandas.GeoSeries` -- A GeoDataFrame or GeoSeries with polygons (and multipolygons). - - -.. py:function:: nested_polygons(gdf) - - Get nested polygons in a GeoDataFrame. - - :Parameters: **gdf** (:class:`geopandas.GeoDataFrame` or :class:`geopandas.GeoSeries`) -- A GeoDataFrame or GeoSeries with (multi)polygons. - - :returns: :class:`dict` -- A dictionary where keys are indices of larger polygons and - values are a list of indices of smaller polygons that are - contained within the larger polygons. - - -.. py:function:: query_indices(tree_gdf, input_gdf, predicate = 'intersects') - - Find the indices of the input_geo that intersect with the tree_geo. - - :Parameters: * **tree_gdf** (:class:`geopandas.GeoDataFrame` or :class:`geopandas.GeoSeries`) -- The tree geodataframe. - * **input_gdf** (:class:`geopandas.GeoDataFrame` or :class:`geopandas.GeoSeries`) -- The input geodataframe. - * **predicate** (:class:`str`, *optional*) -- The predicate to use for the query operation, defaults to ``intesects``. - - :returns: :class:`dict` -- A dictionary of the indices of the ``input_gdf`` that intersect with the - ``tree_gdf``. Keys are the index of ``input_gdf`` and values are a list - of indices of the intersecting ``tree_gdf``. - - -.. py:function:: snap2nearest(lines_gdf, points_gdf, tol) - - Find the nearest points on a line to a set of points. - - :Parameters: * **lines_gdf** (:class:`geopandas.GeoDataFrame` or :class:`geopandas.GeoSeries`) -- Lines. - * **points_gdf** (:class:`geopandas.GeoDataFrame` or :class:`geopandas.GeoSeries`) -- Points to snap to lines. - * **tol** (:class:`float`, *optional*) -- Tolerance for snapping points to the nearest lines in meters. - It must be greater than 0.0. - - :returns: :class:`geopandas.GeoDataFrame` or :class:`geopandas.GeoSeries` -- Points snapped to lines. - - diff --git a/docs/source/autoapi/pygeoutils/index.rst b/docs/source/autoapi/pygeoutils/index.rst deleted file mode 100644 index fb4f6eb..0000000 --- a/docs/source/autoapi/pygeoutils/index.rst +++ /dev/null @@ -1,22 +0,0 @@ -pygeoutils -========== - -.. py:module:: pygeoutils - -.. autoapi-nested-parse:: - - Top-level package for PyGeoUtils. - - - -Submodules ----------- - -.. toctree:: - :maxdepth: 1 - - /autoapi/pygeoutils/geotools/index - /autoapi/pygeoutils/pygeoutils/index - /autoapi/pygeoutils/smoothing/index - - diff --git a/docs/source/autoapi/pygeoutils/pygeoutils/index.rst b/docs/source/autoapi/pygeoutils/pygeoutils/index.rst deleted file mode 100644 index 5308ff9..0000000 --- a/docs/source/autoapi/pygeoutils/pygeoutils/index.rst +++ /dev/null @@ -1,161 +0,0 @@ -pygeoutils.pygeoutils -===================== - -.. py:module:: pygeoutils.pygeoutils - -.. autoapi-nested-parse:: - - Some utilities for manipulating GeoSpatial data. - - - - - -Module Contents ---------------- - -.. py:function:: arcgis2geojson(arcgis, id_attr = None) - - Convert ESRIGeoJSON format to GeoJSON. - - .. rubric:: Notes - - Based on `arcgis2geojson `__. - - :Parameters: * **arcgis** (:class:`str` or :class:`binary`) -- The ESRIGeoJSON format str (or binary) - * **id_attr** (:class:`str`, *optional*) -- ID of the attribute of interest, defaults to ``None``. - - :returns: :class:`dict` -- A GeoJSON file readable by GeoPandas. - - -.. py:function:: geodf2xarray(geodf, resolution, attr_col = None, fill = 0, projected_crs = 5070) - - Rasterize a ``geopandas.GeoDataFrame`` to ``xarray.DataArray``. - - :Parameters: * **geodf** (:class:`geopandas.GeoDataFrame` or :class:`geopandas.GeoSeries`) -- GeoDataFrame or GeoSeries to rasterize. - * **resolution** (:class:`float`) -- Target resolution of the output raster in the ``projected_crs`` unit. Since - the default ``projected_crs`` is ``EPSG:5070``, the default unit for the - resolution is meters. - * **attr_col** (:class:`str`, *optional*) -- Column name of the attribute to use as variable., defaults to ``None``, - i.e., the variable will be a boolean mask where 1 indicates the presence of - a geometry. Also, note that the attribute must be numeric and have one of the - following ``numpy`` types: ``int16``, ``int32``, ``uint8``, ``uint16``, - ``uint32``, ``float32``, and ``float64``. - * **fill** (:class:`int` or :class:`float`, *optional*) -- Value to use for filling the missing values (mask) of the output raster, - defaults to ``0``. - * **projected_crs** (:class:`int`, :class:`str`, or :class:`pyproj.CRS`, *optional*) -- A projected CRS to use for the output raster, defaults to ``EPSG:5070``. - - :returns: :class:`xarray.Dataset` -- The xarray Dataset with a single variable. - - -.. py:function:: gtiff2vrt(tiff_files, vrt_path, relative = False) - - Create a VRT file from a list of (Geo)Tiff files. - - .. note:: - - This function requires ``gdal`` to be installed. - - :Parameters: * **tiff_files** (:class:`list`) -- List of paths to the GeoTiff files. - * **vrt_path** (:class:`str` or :class:`Path`) -- Path to the output VRT file. - * **relative** (:class:`bool`, *optional*) -- If True, use paths relative to the VRT file (default is False). - - -.. py:function:: gtiff2xarray(r_dict, geometry = None, geo_crs = None, ds_dims = None, driver = None, all_touched = False, nodata = None, drop = True) - - Convert (Geo)Tiff byte responses to ``xarray.Dataset``. - - :Parameters: * **r_dict** (:class:`dict`) -- Dictionary of (Geo)Tiff byte responses where keys are some names - that are used for naming each responses, and values are bytes. - * **geometry** (:class:`Polygon`, :class:`MultiPolygon`, or :class:`tuple`, *optional*) -- The geometry to mask the data that should be in the same CRS - as the ``r_dict``. Defaults to ``None``. - * **geo_crs** (:class:`int`, :class:`str`, or :class:`pyproj.CRS`, *optional*) -- The spatial reference of the input geometry, defaults to ``None``. - This argument should be given when ``geometry`` is given. - * **ds_dims** (:class:`tuple` of :class:`str`, *optional*) -- The names of the vertical and horizontal dimensions (in that order) - of the target dataset, default to None. If None, dimension names are - determined from a list of common names. - * **driver** (:class:`str`, *optional*) -- A GDAL driver for reading the content, defaults to automatic - detection. A list of the drivers can be found - `here `__. - * **all_touched** (:class:`bool`, *optional*) -- Include a pixel in the mask if it touches any of the shapes. - If False (default), include a pixel only if its center is within one - of the shapes, or if it is selected by Bresenham's line algorithm. - * **nodata** (:class:`float` or :class:`int`, *optional*) -- The nodata value of the raster, defaults to ``None``, i.e., it is - determined from the raster. - * **drop** (:class:`bool`, *optional*) -- If True, drop the data outside of the extent of the mask geometries. - Otherwise, it will return the same raster with the data masked. - Default is True. - - :returns: :class:`xarray.Dataset` or :class:`xarray.DataAraay` -- Requested dataset or dataarray. - - -.. py:function:: json2geodf(content, in_crs = 4326, crs = 4326) - - Create GeoDataFrame from (Geo)JSON. - - :Parameters: * **content** (:class:`dict` or :class:`list` of :class:`dict`) -- A (Geo)JSON dictionary or a list of them. - * **in_crs** (:class:`int`, :class:`str`, or :class:`pyproj.CRS`, *optional*) -- CRS of the content, defaults to ``epsg:4326``. If the content has no CRS, - it will be set to this CRS, otherwise, ``in_crs`` will be ignored. - * **crs** (:class:`int`, :class:`str`, or :class:`pyproj.CRS`, *optional*) -- The target CRS of the output GeoDataFrame, defaults to ``epsg:4326``. - - :returns: :class:`geopandas.GeoDataFrame` -- Generated geo-data frame from a GeoJSON - - -.. py:function:: sample_window(dataset, xy, window = 5, indexes = None, masked = False, resampling = 1) - - Interpolate pixel values at given coordinates by interpolation. - - .. note:: - - This function is adapted from - the ``rasterio.sample.sample_gen`` function of - `RasterIO `__. - - :Parameters: * **dataset** (:class:`rasterio.DatasetReader`) -- Opened in ``"r"`` mode. - * **xy** (:term:`iterable`) -- Pairs of x, y coordinates in the dataset's reference system. - * **window** (:class:`int`, *optional*) -- Size of the window to read around each point. Must be odd. - Default is 5. - * **indexes** (:class:`int` or :class:`list` of :class:`int`, *optional*) -- Indexes of dataset bands to sample, defaults to all bands. - * **masked** (:class:`bool`, *optional*) -- Whether to mask samples that fall outside the extent of the dataset. - Default is ``False``. - * **resampling** (:class:`int`, *optional*) -- Resampling method to use. See rasterio.enums.Resampling for options. - Default is 1, i.e., ``Resampling.bilinear``. - - :Yields: :class:`numpy.array` -- An array of length equal to the number of specified indexes - containing the interpolated values for the bands corresponding to those indexes. - - -.. py:function:: xarray2geodf(da, dtype, mask_da = None, connectivity = 8) - - Vectorize a ``xarray.DataArray`` to a ``geopandas.GeoDataFrame``. - - :Parameters: * **da** (:class:`xarray.DataArray`) -- The dataarray to vectorize. - * **dtype** (:class:`type`) -- The data type of the dataarray. Valid types are ``int16``, ``int32``, - ``uint8``, ``uint16``, and ``float32``. - * **mask_da** (:class:`xarray.DataArray`, *optional*) -- The dataarray to use as a mask, defaults to ``None``. - * **connectivity** (:class:`int`, *optional*) -- Use 4 or 8 pixel connectivity for grouping pixels into features, - defaults to 8. - - :returns: :class:`geopandas.GeoDataFrame` -- The vectorized dataarray. - - -.. py:function:: xarray_geomask(ds, geometry, crs, all_touched = False, drop = True, from_disk = False) - - Mask a ``xarray.Dataset`` based on a geometry. - - :Parameters: * **ds** (:class:`xarray.Dataset` or :class:`xarray.DataArray`) -- The dataset(array) to be masked - * **geometry** (:class:`Polygon`, :class:`MultiPolygon`, or :class:`tuple` of :class:`length 4`) -- The geometry to mask the data - * **crs** (:class:`int`, :class:`str`, or :class:`pyproj.CRS`) -- The spatial reference of the input geometry - * **all_touched** (:class:`bool`, *optional*) -- Include a pixel in the mask if it touches any of the shapes. - If False (default), include a pixel only if its center is within one - of the shapes, or if it is selected by Bresenham's line algorithm. - * **drop** (:class:`bool`, *optional*) -- If True, drop the data outside of the extent of the mask geometries. - Otherwise, it will return the same raster with the data masked. - Default is True. - * **from_disk** (:class:`bool`, *optional*) -- If True, it will clip from disk using rasterio.mask.mask if possible. - This is beneficial when the size of the data is larger than memory. - Default is False. - - :returns: :class:`xarray.Dataset` or :class:`xarray.DataArray` -- The input dataset with a mask applied (np.nan) - - diff --git a/docs/source/autoapi/pygeoutils/smoothing/index.rst b/docs/source/autoapi/pygeoutils/smoothing/index.rst deleted file mode 100644 index ff1e8d9..0000000 --- a/docs/source/autoapi/pygeoutils/smoothing/index.rst +++ /dev/null @@ -1,248 +0,0 @@ -pygeoutils.smoothing -==================== - -.. py:module:: pygeoutils.smoothing - -.. autoapi-nested-parse:: - - Some utilities for manipulating GeoSpatial data. - - - - - - - -Module Contents ---------------- - -.. py:class:: GeoSpline(points, n_pts, degree = 3, smoothing = None) - - Create a parametric spline from a GeoDataFrame of points. - - :Parameters: * **points** (:class:`geopandas.GeoDataFrame` or :class:`geopandas.GeoSeries` or :term:`array-like ` of :class:`shapely.Point`) -- Input points as a ``GeoDataFrame``, ``GeoSeries``, or array-like of - ``shapely.Point``. The results will be more accurate if the CRS is projected. - * **npts_sp** (:class:`int`) -- Number of points in the output spline curve. - * **degree** (:class:`int`, *optional*) -- Degree of the smoothing spline. Must be - 1 <= ``degree`` <= 5. Default to 3 which is a cubic spline. - * **smoothing** (:class:`float` or :obj:`None`, *optional*) -- Smoothing factor is used for determining the number of knots. - This arg controls the tradeoff between closeness and smoothness of fit. - Larger ``smoothing`` means more smoothing while smaller values of - ``smoothing`` indicates less smoothing. If None (default), smoothing - is done with all points. - - .. rubric:: Examples - - >>> import geopandas as gpd - >>> xl, yl = zip( - ... *[ - ... (-97.06138, 32.837), - ... (-97.06133, 32.836), - ... (-97.06124, 32.834), - ... (-97.06127, 32.832), - ... ] - ... ) - >>> pts = gpd.GeoSeries(gpd.points_from_xy(xl, yl, crs=4326)) - >>> sp = GeoSpline(pts.to_crs(3857), 5).spline - >>> pts_sp = gpd.GeoSeries(gpd.points_from_xy(sp.x, sp.y, crs=3857)) - >>> pts_sp = pts_sp.to_crs(4326) - >>> list(zip(pts_sp.x, pts_sp.y)) - [(-97.06138, 32.837), - (-97.06132, 32.83575), - (-97.06126, 32.83450), - (-97.06123, 32.83325), - (-97.06127, 32.83200)] - - - .. py:property:: spline - :type: Spline - - - Get the spline as a ``Spline`` object. - - -.. py:function:: anchored_smoothing(line, npts = None, sigma = None) - - Fit a cubic spline through a line while anchoring the ends. - - :Parameters: * **line** (:class:`shapey.LineString`) -- Line to smooth. - * **npts** (:class:`int`, *optional*) -- Number of points for uniform spacing of the generated spline, defaults - to ``None``, i.e., the number of points along the original line. - * **sigma** (:class:`float`, *optional*) -- Standard deviation for Gaussian kernel used for filtering noise in the line - before fitting the spline. Defaults to ``None``, i.e., no filtering. - - :returns: :class:`numpy.ndarray` -- The fitted cubic spline. - - -.. py:function:: line_curvature(line, k = 3, s = None) - - Compute the curvature of a LineString. - - .. rubric:: Notes - - The formula for the curvature of a Spline curve is: - - .. math:: - - \kappa = \frac{\dot{x}\ddot{y} - \ddot{x}\dot{y}}{(\dot{x}^2 + \dot{y}^2)^{3/2}} - - where :math:`\dot{x}` and :math:`\dot{y}` are the first derivatives of the - Spline curve and :math:`\ddot{x}` and :math:`\ddot{y}` are the second - derivatives of the Spline curve. Also, the radius of curvature is: - - .. math:: - - \rho = \frac{1}{|\kappa|} - - :Parameters: * **line** (:class:`shapely.LineString`) -- Line to compute the curvature at. - * **k** (:class:`int`, *optional*) -- Degree of the smoothing spline. Must be - 1 <= ``k`` <= 5. Default to 3 which is a cubic spline. - * **s** (:class:`float` or :obj:`None`, *optional*) -- Smoothing factor is used for determining the number of knots. - This arg controls the tradeoff between closeness and smoothness of fit. - Larger ``s`` means more smoothing while smaller values of ``s`` indicates - less smoothing. If None (default), smoothing is done with all data points. - - :returns: * **phi** (:class:`numpy.ndarray`) -- Angle of the tangent of the Spline curve. - * **curvature** (:class:`numpy.ndarray`) -- Curvature of the Spline curve. - * **radius** (:class:`numpy.ndarray`) -- Radius of curvature of the Spline curve. - - -.. py:function:: make_spline(x, y, n_pts, k = 3, s = None) - - Create a parametric spline from a set of points. - - :Parameters: * **x** (:class:`numpy.ndarray`) -- x-coordinates of the points. - * **y** (:class:`numpy.ndarray`) -- y-coordinates of the points. - * **n_pts** (:class:`int`) -- Number of points in the output spline curve. - * **k** (:class:`int`, *optional*) -- Degree of the smoothing spline. Must be - 1 <= ``k`` <= 5. Default to 3 which is a cubic spline. - * **s** (:class:`float` or :obj:`None`, *optional*) -- Smoothing factor is used for determining the number of knots. - This arg controls the tradeoff between closeness and smoothness of fit. - Larger ``s`` means more smoothing while smaller values of ``s`` indicates - less smoothing. If None (default), smoothing is done with all data points. - - :returns: :class:`Spline` -- A Spline object with ``x``, ``y``, ``phi``, ``radius``, ``distance``, - and ``line`` attributes. The ``line`` attribute returns the Spline - as a ``shapely.LineString``. - - -.. py:function:: smooth_linestring(line, smoothing = None, npts = None) - - Smooth a LineString using ``UnivariateSpline`` from ``scipy``. - - :Parameters: * **line** (:class:`shapely.LineString`) -- Centerline to be smoothed. - * **smoothing** (:class:`float` or :obj:`None`, *optional*) -- Smoothing factor is used for determining the number of knots. - This arg controls the tradeoff between closeness and smoothness of fit. - Larger ``smoothing`` means more smoothing while smaller values of - ``smoothing`` indicates less smoothing. If None (default), smoothing - is done with all points. - * **npts** (:class:`int`, *optional*) -- Number of points in the output smoothed line. Defaults to 5 times - the number of points in the input line. - - :returns: :class:`shapely.LineString` -- Smoothed line with uniform spacing. - - .. rubric:: Examples - - >>> import geopandas as gpd - >>> import shapely - >>> line = shapely.LineString( - ... [ - ... (-97.06138, 32.837), - ... (-97.06133, 32.836), - ... (-97.06124, 32.834), - ... (-97.06127, 32.832), - ... ] - ... ) - >>> line_smooth = smooth_linestring(line, 4326, 5) - >>> list(zip(*line_smooth.xy)) - [(-97.06138, 32.837), - (-97.06132, 32.83575), - (-97.06126, 32.83450), - (-97.06123, 32.83325), - (-97.06127, 32.83200)] - - -.. py:function:: smooth_multilinestring(mline, npts_list = None, sigma = None) - - Smooth a MultiLineString using a cubic spline. - - :Parameters: * **mline** (:class:`shapely.MultiLineString`) -- MultiLineString to smooth. - * **npts_list** (:class:`list` of :class:`int`, *optional*) -- Number of points for uniform spacing of the generated spline, defaults - to ``None``, i.e., the number of points along each line in the MultiLineString. - * **sigma** (:class:`float`, *optional*) -- Standard deviation for Gaussian kernel used for filtering noise in the line - before fitting the spline. Defaults to ``None``, i.e., no filtering. - - :returns: :class:`shapely.MultiLineString` -- The fitted cubic spline. - - -.. py:function:: spline_curvature(spline_x, spline_y, konts) - - Compute the curvature of a Spline curve. - - .. rubric:: Notes - - The formula for the curvature of a Spline curve is: - - .. math:: - - \kappa = \frac{\dot{x}\ddot{y} - \ddot{x}\dot{y}}{(\dot{x}^2 + \dot{y}^2)^{3/2}} - - where :math:`\dot{x}` and :math:`\dot{y}` are the first derivatives of the - Spline curve and :math:`\ddot{x}` and :math:`\ddot{y}` are the second - derivatives of the Spline curve. Also, the radius of curvature is: - - .. math:: - - \rho = \frac{1}{|\kappa|} - - :Parameters: * **spline_x** (:class:`scipy.interpolate.UnivariateSpline`) -- Spline curve for the x-coordinates of the points. - * **spline_y** (:class:`scipy.interpolate.UnivariateSpline`) -- Spline curve for the y-coordinates of the points. - * **konts** (:class:`numpy.ndarray`) -- Knots along the Spline curve to compute the curvature at. The knots - must be strictly increasing. - - :returns: * **phi** (:class:`numpy.ndarray`) -- Angle of the tangent of the Spline curve. - * **curvature** (:class:`numpy.ndarray`) -- Curvature of the Spline curve. - * **radius** (:class:`numpy.ndarray`) -- Radius of curvature of the Spline curve. - - -.. py:function:: spline_linestring(line, n_pts, degree = 3, smoothing = None) - - Generate a parametric spline from a LineString. - - :Parameters: * **line** (:class:`shapely.LineString`, :class:`shapely.MultiLineString`) -- Line to smooth. Note that if ``line`` is ``MultiLineString`` - it will be merged into a single ``LineString``. If the merge - fails, an exception will be raised. - * **n_pts** (:class:`int`) -- Number of points in the output spline curve. - * **degree** (:class:`int`, *optional*) -- Degree of the smoothing spline. Must be - 1 <= ``degree`` <= 5. Default to 3 which is a cubic spline. - * **smoothing** (:class:`float` or :obj:`None`, *optional*) -- Smoothing factor is used for determining the number of knots. - This arg controls the tradeoff between closeness and smoothness of fit. - Larger ``smoothing`` means more smoothing while smaller values of - ``smoothing`` indicates less smoothing. If None (default), smoothing - is done with all points. - - :returns: :class:`Spline` -- A :class:`Spline` object with ``x``, ``y``, ``phi``, ``radius``, - ``distance``, and ``line`` attributes. The ``line`` attribute - returns the Spline as a shapely.LineString. - - .. rubric:: Examples - - >>> import geopandas as gpd - >>> import shapely - >>> line = shapely.LineString( - ... [ - ... (-97.06138, 32.837), - ... (-97.06133, 32.836), - ... (-97.06124, 32.834), - ... (-97.06127, 32.832), - ... ] - ... ) - >>> sp = spline_linestring(line, 4326, 5) - >>> list(zip(*sp.line.xy)) - [(-97.06138, 32.837), - (-97.06132, 32.83575), - (-97.06126, 32.83450), - (-97.06123, 32.83325), - (-97.06127, 32.83200)] - - diff --git a/docs/source/autoapi/pygridmet/core/index.rst b/docs/source/autoapi/pygridmet/core/index.rst deleted file mode 100644 index 00fe77e..0000000 --- a/docs/source/autoapi/pygridmet/core/index.rst +++ /dev/null @@ -1,97 +0,0 @@ -pygridmet.core -============== - -.. py:module:: pygridmet.core - -.. autoapi-nested-parse:: - - Core class for the GridMET functions. - - - - - -Module Contents ---------------- - -.. py:class:: GridMET(dates = 2000, variables = None, snow = False) - - Base class for GridMET requests. - - :Parameters: * **dates** (:class:`tuple` or :class:`int` or :class:`list`, *optional*) -- Start and end dates as a tuple, (start, end), or a list of years. - Defaults to ``2000`` so the class can be initialized without any arguments. - * **variables** (:class:`str` or :class:`list` or :class:`tuple`, *optional*) -- List of variables to be downloaded. The acceptable variables are: - ``pr``, ``rmax``, ``rmin``, ``sph``, ``srad``, ``th``, ``tmmn``, ``tmmx``, ``vs``, - ``bi``, ``fm100``, ``fm1000``, ``erc``, ``etr``, ``pet``, and ``vpd``. - Descriptions can be found `here `__. - Defaults to ``None``, i.e., all the variables are downloaded. - * **snow** (:class:`bool`, *optional*) -- Compute snowfall from precipitation and minimum temperature. Defaults to ``False``. - - .. rubric:: References - - .. footbibliography:: - - - .. py:method:: check_dates(dates) - :staticmethod: - - - Check if input dates are in correct format and valid. - - - - .. py:method:: dates_todict(dates) - - Set dates by start and end dates as a tuple, (start, end). - - - - .. py:method:: dates_tolist(dates) - - Correct dates for GridMET accounting for leap years. - - GridMET doesn't account for leap years and removes Dec 31 when - it's leap year. - - :Parameters: **dates** (:class:`tuple`) -- Target start and end dates. - - :returns: :class:`list` -- All the dates in the GridMET database within the provided date range. - - - - .. py:method:: separate_snow(clm, t_rain = T_RAIN, t_snow = T_SNOW) - - Separate snow based on :footcite:t:`Martinez_2010`. - - :Parameters: * **clm** (:class:`pandas.DataFrame` or :class:`xarray.Dataset`) -- Climate data that should include ``pr`` and ``tmmn``. - * **t_rain** (:class:`float`, *optional*) -- Threshold for temperature for considering rain, defaults to 2.5 K. - * **t_snow** (:class:`float`, *optional*) -- Threshold for temperature for considering snow, defaults to 0.6 K. - - :returns: :class:`pandas.DataFrame` or :class:`xarray.Dataset` -- Input data with ``snow (mm)`` column if input is a ``pandas.DataFrame``, - or ``snow`` variable if input is an ``xarray.Dataset``. - - .. rubric:: References - - .. footbibliography:: - - - - .. py:method:: years_todict(years) - - Set date by list of year(s). - - - - .. py:method:: years_tolist(years) - - Correct dates for GridMET accounting for leap years. - - GridMET doesn't account for leap years and removes Dec 31 when - it's leap year. - - :Parameters: **years** (:class:`list`) -- A list of target years. - - :returns: :class:`list` -- All the dates in the GridMET database within the provided date range. - - - diff --git a/docs/source/autoapi/pygridmet/index.rst b/docs/source/autoapi/pygridmet/index.rst deleted file mode 100644 index e3d399b..0000000 --- a/docs/source/autoapi/pygridmet/index.rst +++ /dev/null @@ -1,21 +0,0 @@ -pygridmet -========= - -.. py:module:: pygridmet - -.. autoapi-nested-parse:: - - Top-level package for PyGridMET. - - - -Submodules ----------- - -.. toctree:: - :maxdepth: 1 - - /autoapi/pygridmet/core/index - /autoapi/pygridmet/pygridmet/index - - diff --git a/docs/source/autoapi/pygridmet/pygridmet/index.rst b/docs/source/autoapi/pygridmet/pygridmet/index.rst deleted file mode 100644 index 29adb36..0000000 --- a/docs/source/autoapi/pygridmet/pygridmet/index.rst +++ /dev/null @@ -1,109 +0,0 @@ -pygridmet.pygridmet -=================== - -.. py:module:: pygridmet.pygridmet - -.. autoapi-nested-parse:: - - Access the GridMET database for both single single pixel and gridded queries. - - - - - -Module Contents ---------------- - -.. py:function:: get_bycoords(coords, dates, coords_id = None, crs = 4326, variables = None, snow = False, snow_params = None, to_xarray = False) - - Get point-data from the GridMET database at 1-km resolution. - - :Parameters: * **coords** (:class:`tuple` or :class:`list` of :class:`tuples`) -- Coordinates of the location(s) of interest as a tuple (x, y) - * **dates** (:class:`tuple` or :class:`list`, *optional*) -- Start and end dates as a tuple (start, end) or a list of years ``[2001, 2010, ...]``. - * **coords_id** (:class:`list` of :class:`int` or :class:`str`, *optional*) -- A list of identifiers for the coordinates. This option only applies when ``to_xarray`` - is set to ``True``. If not provided, the coordinates will be enumerated. - * **crs** (:class:`str`, :class:`int`, or :class:`pyproj.CRS`, *optional*) -- The CRS of the input coordinates, defaults to ``EPSG:4326``. - * **variables** (:class:`str` or :class:`list`) -- List of variables to be downloaded. The acceptable variables are: - ``pr``, ``rmax``, ``rmin``, ``sph``, ``srad``, ``th``, ``tmmn``, ``tmmx``, ``vs``, - ``bi``, ``fm100``, ``fm1000``, ``erc``, ``etr``, ``pet``, and ``vpd``. - Descriptions can be found `here `__. - Defaults to ``None``, i.e., all the variables are downloaded. - * **snow** (:class:`bool`, *optional*) -- Compute snowfall from precipitation and minimum temperature. Defaults to ``False``. - * **snow_params** (:class:`dict`, *optional*) -- Model-specific parameters as a dictionary that is passed to the snowfall function. - These parameters are only used if ``snow`` is ``True``. Two parameters are required: - ``t_rain`` (deg C) which is the threshold for temperature for considering rain and - ``t_snow`` (deg C) which is the threshold for temperature for considering snow. - The default values are ``{'t_rain': 2.5, 't_snow': 0.6}`` that are adopted from - https://doi.org/10.5194/gmd-11-1077-2018. - * **to_xarray** (:class:`bool`, *optional*) -- Return the data as an ``xarray.Dataset``. Defaults to ``False``. - - :returns: :class:`pandas.DataFrame` or :class:`xarray.Dataset` -- Daily climate data for a single or list of locations. - - .. rubric:: Examples - - >>> import pygridmet as gridmet - >>> coords = (-1431147.7928, 318483.4618) - >>> dates = ("2000-01-01", "2000-01-31") - >>> clm = gridmet.get_bycoords( - ... coords, - ... dates, - ... crs=3542, - ... ) - >>> clm["pr (mm)"].mean() - 9.677 - - -.. py:function:: get_bygeom(geometry, dates, crs = 4326, variables = None, snow = False, snow_params = None) - - Get gridded data from the GridMET database at 1-km resolution. - - :Parameters: * **geometry** (:class:`Polygon` or :class:`tuple`) -- The geometry of the region of interest. It can be a shapely Polygon or a tuple - of length 4 representing the bounding box (minx, miny, maxx, maxy). - * **dates** (:class:`tuple` or :class:`list`, *optional*) -- Start and end dates as a tuple (start, end) or a list of years [2001, 2010, ...]. - * **crs** (:class:`str`, :class:`int`, or :class:`pyproj.CRS`, *optional*) -- The CRS of the input geometry, defaults to epsg:4326. - * **variables** (:class:`str` or :class:`list`) -- List of variables to be downloaded. The acceptable variables are: - ``pr``, ``rmax``, ``rmin``, ``sph``, ``srad``, ``th``, ``tmmn``, ``tmmx``, ``vs``, - ``bi``, ``fm100``, ``fm1000``, ``erc``, ``etr``, ``pet``, and ``vpd``. - Descriptions can be found `here `__. - Defaults to ``None``, i.e., all the variables are downloaded. - * **snow** (:class:`bool`, *optional*) -- Compute snowfall from precipitation and minimum temperature. Defaults to ``False``. - * **snow_params** (:class:`dict`, *optional*) -- Model-specific parameters as a dictionary that is passed to the snowfall function. - These parameters are only used if ``snow`` is ``True``. Two parameters are required: - ``t_rain`` (deg C) which is the threshold for temperature for considering rain and - ``t_snow`` (deg C) which is the threshold for temperature for considering snow. - The default values are ``{'t_rain': 2.5, 't_snow': 0.6}`` that are adopted from - https://doi.org/10.5194/gmd-11-1077-2018. - - :returns: :class:`xarray.Dataset` -- Daily climate data within the target geometry. - - .. rubric:: Examples - - >>> from shapely import Polygon - >>> import pygridmet as gridmet - >>> geometry = Polygon( - ... [[-69.77, 45.07], [-69.31, 45.07], [-69.31, 45.45], [-69.77, 45.45], [-69.77, 45.07]] - ... ) - >>> clm = gridmet.get_bygeom(geometry, 2010, variables="tmmn") - >>> clm["tmmn"].mean().item() - 274.167 - - -.. py:function:: get_conus(years, variables = None, save_dir = 'clm_gridmet') - - Get the entire CONUS data for the specified years and variables. - - :Parameters: * **years** (:class:`int` or :class:`list`) -- The year(s) of interest. - * **variables** (:class:`str` or :class:`list`, *optional*) -- The variable(s) of interest, defaults to ``None`` which downloads - all the variables. - * **save_dir** (:class:`str` or :class:`Path`, *optional*) -- The directory to store the downloaded data, defaults to ``./clm_gridmet``. - The files are stored in the NetCDF format and the file names are based - on the variable names and the years, e.g., ``tmmn_2010.nc``. - - :returns: :class:`list` -- A list of the downloaded files. - - .. rubric:: Examples - - >>> import pygridmet as gridmet - >>> filenames = gridmet.get_conus(2010, "tmmn") - - diff --git a/docs/source/autoapi/pynhd/core/index.rst b/docs/source/autoapi/pynhd/core/index.rst deleted file mode 100644 index 0b8e459..0000000 --- a/docs/source/autoapi/pynhd/core/index.rst +++ /dev/null @@ -1,267 +0,0 @@ -pynhd.core -========== - -.. py:module:: pynhd.core - -.. autoapi-nested-parse:: - - Base classes for PyNHD functions. - - - - - -Module Contents ---------------- - -.. py:class:: AGRBase(base_url, layer = None, outfields = '*', crs = 4326, outformat = 'json') - - Base class for getting geospatial data from a ArcGISRESTful service. - - :Parameters: * **base_url** (:class:`str`, *optional*) -- The ArcGIS RESTful service url. The URL must either include a layer number - after the last ``/`` in the url or the target layer must be passed as an argument. - * **layer** (:class:`str`, *optional*) -- A valid service layer. To see a list of available layers instantiate the class - without passing any argument. - * **outfields** (:class:`str` or :class:`list`, *optional*) -- Target field name(s), default to "*" i.e., all the fields. - * **crs** (:class:`str`, :class:`int`, or :class:`pyproj.CRS`, *optional*) -- Target spatial reference, default to ``EPSG:4326`` - * **outformat** (:class:`str`, *optional*) -- One of the output formats offered by the selected layer. If not correct - a list of available formats is shown, defaults to ``json``. - - - .. py:method:: bygeom(geom, geo_crs = 4326, sql_clause = '', distance = None, return_m = False, return_geom = True) - - Get feature within a geometry that can be combined with a SQL where clause. - - :Parameters: * **geom** (:class:`Polygon` or :class:`tuple`) -- A geometry (Polygon) or bounding box (tuple of length 4). - * **geo_crs** (:class:`str`) -- The spatial reference of the input geometry. - * **sql_clause** (:class:`str`, *optional*) -- A valid SQL 92 WHERE clause, defaults to an empty string. - * **distance** (:class:`int`, *optional*) -- The buffer distance for the input geometries in meters, default to None. - * **return_m** (:class:`bool`, *optional*) -- Whether to activate the Return M (measure) in the request, defaults to False. - * **return_geom** (:class:`bool`, *optional*) -- Whether to return the geometry of the feature, defaults to ``True``. - - :returns: :class:`geopandas.GeoDataFrame` -- The requested features as a GeoDataFrame. - - - - .. py:method:: byids(field, fids, return_m = False, return_geom = True) - - Get features based on a list of field IDs. - - :Parameters: * **field** (:class:`str`) -- Name of the target field that IDs belong to. - * **fids** (:class:`str` or :class:`list`) -- A list of target field ID(s). - * **return_m** (:class:`bool`) -- Whether to activate the Return M (measure) in the request, defaults to False. - * **return_geom** (:class:`bool`, *optional*) -- Whether to return the geometry of the feature, defaults to ``True``. - - :returns: :class:`geopandas.GeoDataFrame` -- The requested features as a GeoDataFrame. - - - - .. py:method:: bysql(sql_clause, return_m = False, return_geom = True) - - Get feature IDs using a valid SQL 92 WHERE clause. - - .. rubric:: Notes - - Not all web services support this type of query. For more details look - `here `__ - - :Parameters: * **sql_clause** (:class:`str`) -- A valid SQL 92 WHERE clause. - * **return_m** (:class:`bool`) -- Whether to activate the measure in the request, defaults to False. - * **return_geom** (:class:`bool`, *optional*) -- Whether to return the geometry of the feature, defaults to ``True``. - - :returns: :class:`geopandas.GeoDataFrame` -- The requested features as a GeoDataFrame. - - - - .. py:method:: get_validlayers(url) - :staticmethod: - - - Get a list of valid layers. - - :Parameters: **url** (:class:`str`) -- The URL of the ArcGIS REST service. - - :returns: :class:`dict` -- A dictionary of valid layers. - - - - .. py:property:: service_info - :type: ServiceInfo - - - Get the service information. - - -.. py:class:: GeoConnex(item = None, dev = False, max_nfeatures = 10000) - - Access to the GeoConnex API. - - .. rubric:: Notes - - The ``geometry`` field of the query can be a Polygon, MultiPolygon, - or tuple/list of length 4 (bbox) in ``EPSG:4326`` CRS. They should - be within the extent of the GeoConnex endpoint. - - :Parameters: * **The item (service endpoint) to query. Valid endpoints are** -- - - - ``hu02`` for Two-digit Hydrologic Regions - - ``hu04`` for Four-digit Hydrologic Subregion - - ``hu06`` for Six-digit Hydrologic Basins - - ``hu08`` for Eight-digit Hydrologic Subbasins - - ``hu10`` for Ten-digit Watersheds - - ``nat_aq`` for National Aquifers of the United States from - USGS National Water Information System National Aquifer code list. - - ``principal_aq`` for Principal Aquifers of the United States from - 2003 USGS data release - - ``sec_hydrg_reg`` for Secondary Hydrogeologic Regions of the - Conterminous United States from 2018 USGS data release - - ``gages`` for US Reference Stream Gauge Monitoring Locations - - ``mainstems`` for US Reference Mainstem Rivers - - ``states`` for U.S. States - - ``counties`` for U.S. Counties - - ``aiannh`` for Native American Lands - - ``cbsa`` for U.S. Metropolitan and Micropolitan Statistical Areas - - ``ua10`` for Urbanized Areas and Urban Clusters (2010 Census) - - ``places`` for U.S. legally incororated and Census designated places - - ``pws`` for U.S. Public Water Systems - - ``dams`` for US Reference Dams - * **dev** (:class:`bool`, *optional*) -- Whether to use the development endpoint, defaults to ``False``. - * **max_nfeatures** (:class:`int`, *optional*) -- The maximum number of features to request from the service, - defaults to 10000. - - - .. py:method:: bybox(bbox, skip_geometry = False) - - Query the GeoConnex endpoint by bounding box. - - :Parameters: * **bbox** (:class:`tuple`) -- A bounding box in the form of ``(xmin, ymin, xmax, ymax)``, - in ``EPSG:4326`` CRS, i.e., decimal degrees. - * **skip_geometry** (:class:`bool`, *optional*) -- If ``True``, no geometry will not be returned, by default ``False``. - - :returns: :class:`geopandas.GeoDataFrame` -- The query result as a ``geopandas.GeoDataFrame``. - - - - .. py:method:: bycql(cql_dict: dict[str, Any], skip_geometry: Literal[False] = False) -> geopandas.GeoDataFrame - bycql(cql_dict: dict[str, Any], skip_geometry: Literal[True]) -> pandas.DataFrame - - Query the GeoConnex endpoint. - - .. rubric:: Notes - - GeoConnex only supports Basinc CQL2 queries. For more information - and examples visit this - `link `__. - Use this for non-spatial queries, since there's a dedicated method - for spatial queries, :meth:`.bygeometry`. - - :Parameters: * **cql_dict** (:class:`dict`) -- A valid CQL dictionary (non-spatial queries). - * **skip_geometry** (:class:`bool`, *optional*) -- If ``True``, no geometry will not be returned, by default ``False``. - - :returns: :class:`geopandas.GeoDataFrame` -- The query result as a ``geopandas.GeoDataFrame``. - - - - .. py:method:: byfilter(filter_str, skip_geometry = False) - - Query the GeoConnex endpoint. - - .. rubric:: Notes - - GeoConnex only supports simple CQL queries. For more information - and examples visit https://portal.ogc.org/files/96288 - - :Parameters: * **filter_str** (:class:`dict`) -- A valid filter string. The filter string shouldn't be long - since a GET request is used. - * **skip_geometry** (:class:`bool`, *optional*) -- If ``True``, no geometry will not be returned, by default ``False``. - - :returns: :class:`geopandas.GeoDataFrame` -- The query result as a ``geopandas.GeoDataFrame``. - - - - .. py:method:: bygeometry(geometry1: GeoType, geometry2: GeoType | None = ..., predicate: str = ..., crs: CRSType = ..., skip_geometry: Literal[False] = False) -> geopandas.GeoDataFrame - bygeometry(geometry1: GeoType, geometry2: GeoType | None = ..., predicate: str = ..., crs: CRSType = ..., skip_geometry: Literal[True] = True) -> pandas.DataFrame - - Query the GeoConnex endpoint by geometry. - - :Parameters: * **geometry1** (:class:`Polygon` or :class:`tuple` of :class:`float`) -- The first geometry or bounding boxes to query. A bounding box is - a tuple of length 4 in the form of ``(xmin, ymin, xmax, ymax)``. - For example, an spatial query for a single geometry would be - ``INTERSECTS(geom, geometry1)``. - * **geometry2** (:class:`Polygon` or :class:`tuple` of :class:`float`, *optional*) -- The second geometry or bounding boxes to query. A bounding box is - a tuple of length 4 in the form of ``(xmin, ymin, xmax, ymax)``. - Default is ``None``. For example, an spatial query for a two - geometries would be ``CROSSES(geometry1, geometry2)``. - * **predicate** (:class:`str`, *optional*) -- The predicate to use, by default ``intersects``. Supported - predicates are ``intersects``, ``equals``, ``disjoint``, ``touches``, - ``within``, ``overlaps``, ``crosses`` and ``contains``. - * **crs** (:class:`int` or :class:`str` or :class:`pyproj.CRS`, *optional*) -- The CRS of the polygon, by default ``EPSG:4326``. If the input - is a ``geopandas.GeoDataFrame`` or ``geopandas.GeoSeries``, - this argument will be ignored. - * **skip_geometry** (:class:`bool`, *optional*) -- If ``True``, no geometry will not be returned. - - :returns: :class:`geopandas.GeoDataFrame` -- The query result as a ``geopandas.GeoDataFrame``. - - - - .. py:method:: byid(feature_name: str, feature_ids: list[str] | str, skip_geometry: Literal[False] = False) -> geopandas.GeoDataFrame - byid(feature_name: str, feature_ids: list[str] | str, skip_geometry: Literal[True]) -> pandas.DataFrame - - Query the GeoConnex endpoint. - - :Parameters: * **feature_name** (:class:`str`) -- The name of the feature to query. - * **feature_ids** (:class:`list` or :class:`str`) -- The IDs of the feature to query. - * **skip_geometry** (:class:`bool`, *optional*) -- If ``True``, no geometry will not be returned, by default ``False``. - - :returns: :class:`geopandas.GeoDataFrame` -- The query result as a ``geopandas.GeoDataFrame`` or a ``pandas.DataFrame``. - - - - .. py:method:: byitem(item_id) - - Query the GeoConnex endpoint by an item ID. - - :Parameters: **item_id** (:class:`str`) -- The ID of the item to query. Note that this GeoConnex's item ID which - is not necessarily the same as the provider's item ID. For example, - for querying gages, the item ID is not the same as the USGS gage ID - but for querying HUC02, the item ID is the same as the HUC02 ID. - - :returns: :class:`geopandas.GeoDataFrame` -- The query result as a ``geopandas.GeoDataFrame``. - - - - .. py:property:: dev - :type: bool - - - Return the name of the endpoint. - - - .. py:property:: item - :type: str | None - - - Return the name of the endpoint. - - -.. py:class:: ScienceBase - - Access and explore items on USGS's ScienceBase. - - - .. py:method:: get_children(item) - :staticmethod: - - - Get children items of an item. - - - - .. py:method:: get_file_urls(item) - :staticmethod: - - - Get download and meta URLs of all the available files for an item. - - - diff --git a/docs/source/autoapi/pynhd/index.rst b/docs/source/autoapi/pynhd/index.rst deleted file mode 100644 index 9630b85..0000000 --- a/docs/source/autoapi/pynhd/index.rst +++ /dev/null @@ -1,23 +0,0 @@ -pynhd -===== - -.. py:module:: pynhd - -.. autoapi-nested-parse:: - - Top-level package for PyNHD. - - - -Submodules ----------- - -.. toctree:: - :maxdepth: 1 - - /autoapi/pynhd/core/index - /autoapi/pynhd/network_tools/index - /autoapi/pynhd/nhdplus_derived/index - /autoapi/pynhd/pynhd/index - - diff --git a/docs/source/autoapi/pynhd/network_tools/index.rst b/docs/source/autoapi/pynhd/network_tools/index.rst deleted file mode 100644 index 539b18c..0000000 --- a/docs/source/autoapi/pynhd/network_tools/index.rst +++ /dev/null @@ -1,367 +0,0 @@ -pynhd.network_tools -=================== - -.. py:module:: pynhd.network_tools - -.. autoapi-nested-parse:: - - Access NLDI and WaterData databases. - - - - - - - -Module Contents ---------------- - -.. py:class:: NHDTools(flowlines) - - Prepare NHDPlus data for downstream analysis. - - .. rubric:: Notes - - Some of these tools are ported from - `nhdplusTools `__. - - :Parameters: **flowlines** (:class:`geopandas.GeoDataFrame`) -- NHDPlus flowlines with at least the following columns: - ``comid``, ``lengthkm``, ``ftype``, ``terminalfl``, ``fromnode``, ``tonode``, - ``totdasqkm``, ``startflag``, ``streamorde``, ``streamcalc``, ``terminalpa``, - ``pathlength``, ``divergence``, ``hydroseq``, and ``levelpathi``. - - - .. py:method:: add_tocomid() - - Find the downstream comid(s) of each comid in NHDPlus flowline database. - - .. rubric:: Notes - - This functions requires the following columns: - ``comid``, ``terminalpa``, ``fromnode``, ``tonode`` - - - - .. py:method:: check_requirements(reqs, cols) - :staticmethod: - - - Check for all the required data. - - :Parameters: * **reqs** (:term:`iterable`) -- A list of required data names (str) - * **cols** (:class:`list`) -- A list of variable names (str) - - - - .. py:method:: clean_flowlines(use_enhd_attrs, terminal2nan) - - Clean up flowlines. - - :Parameters: * **use_enhd_attrs** (:class:`bool`) -- Use attributes from the ENHD database. - * **terminal2nan** (:class:`bool`) -- Convert terminal flowlines to ``NaN``. - - - - .. py:method:: remove_isolated() - - Remove isolated flowlines. - - - - .. py:method:: remove_tinynetworks(min_path_size, min_path_length, min_network_size) - - Remove small paths in NHDPlus flowline database. - - .. rubric:: Notes - - This functions requires the following columns: - ``levelpathi``, ``hydroseq``, ``totdasqkm``, ``terminalfl``, ``startflag``, - ``pathlength``, and ``terminalpa``. - - :Parameters: * **min_network_size** (:class:`float`) -- Minimum size of drainage network in sqkm. - * **min_path_length** (:class:`float`) -- Minimum length of terminal level path of a network in km. - * **min_path_size** (:class:`float`) -- Minimum size of outlet level path of a drainage basin in km. - Drainage basins with an outlet drainage area smaller than - this value will be removed. - - - - .. py:method:: to_linestring() - - Convert flowlines to shapely LineString objects. - - - -.. py:function:: enhd_flowlines_nx() - - Get a ``networkx.DiGraph`` of the entire NHD flowlines. - - .. versionchanged:: 0.16.2 - - The function now replaces all 0 values in the ``tocomid`` column of ENHD - with the negative of their corresponding ``comid`` values. This ensures - all sinks are unique and treated accordingly for topological sorting - and other network analysis. The difference are in the returned - ``label2comid`` dictionary and ``onnetwork_sorted`` which will contain - the negative values for the sinks. - - .. rubric:: Notes - - The graph is directed and has the all the attributes of the flowlines - in `ENHD `__. - Note that COMIDs are based on the 2020 snapshot of the NHDPlusV2.1. - - :returns: * **graph** (:class:`networkx.DiGraph`) -- The generated directed graph - * **label2comid** (:class:`dict`) -- A mapping of COMIDs to the node IDs in the graph - * **onnetwork_sorted** (:class:`list`) -- A topologically sorted list of the COMIDs. - - -.. py:function:: flowline_resample(flw, spacing, id_col = 'comid', smoothing = None) - - Resample a flowline based on a given spacing. - - :Parameters: * **flw** (:class:`geopandas.GeoDataFrame`) -- A dataframe with ``geometry`` and ``id_col`` columns and CRS attribute. - The flowlines should be able to merged to a single ``LineString``. - Otherwise, you should use the :func:`network_resample` function. - * **spacing** (:class:`float`) -- Spacing between the sample points in meters. - * **id_col** (:class:`str`, *optional*) -- Name of the flowlines column containing IDs, defaults to ``comid``. - * **smoothing** (:class:`float` or :obj:`None`, *optional*) -- Smoothing factor is used for determining the number of knots. - This arg controls the tradeoff between closeness and smoothness of fit. - Larger ``smoothing`` means more smoothing while smaller values of - ``smoothing`` indicates less smoothing. If None (default), smoothing - is done with all points. - - :returns: :class:`geopandas.GeoDataFrame` -- Resampled flowline. - - -.. py:function:: flowline_xsection(flw, distance, width, id_col = 'comid', smoothing = None) - - Get cross-section of a river network at a given spacing. - - :Parameters: * **flw** (:class:`geopandas.GeoDataFrame`) -- A dataframe with ``geometry`` and, ``id_col``, and ``levelpathi`` - columns and a projected CRS attribute. - * **distance** (:class:`float`) -- The distance between two consecutive cross-sections. - * **width** (:class:`float`) -- The width of the cross-section. - * **id_col** (:class:`str`, *optional*) -- Name of the flowlines column containing IDs, defaults to ``comid``. - * **smoothing** (:class:`float` or :obj:`None`, *optional*) -- Smoothing factor is used for determining the number of knots. - This arg controls the tradeoff between closeness and smoothness of fit. - Larger ``smoothing`` means more smoothing while smaller values of - ``smoothing`` indicates less smoothing. If None (default), smoothing - is done with all points. - - :returns: :class:`geopandas.GeoDataFrame` -- A dataframe with two columns: ``geometry`` and ``comid``. The ``geometry`` - column contains the cross-section of the river network and the ``comid`` - column contains the corresponding ``comid`` from the input dataframe. - Note that each ``comid`` can have multiple cross-sections depending on - the given spacing distance. - - -.. py:function:: mainstem_huc12_nx() - - Get a ``networkx.DiGraph`` of the entire mainstem HUC12s. - - .. rubric:: Notes - - The directed graph is generated from the ``nhdplusv2wbd.csv`` file with all - attributes that can be found in - `Mainstem `__. - Note that HUC12s are based on the 2020 snapshot of the NHDPlusV2.1. - - :returns: * :class:`networkx.DiGraph` -- The mainstem as a ``networkx.DiGraph`` with all the attributes of the - mainstems. - * :class:`dict` -- A mapping of the HUC12s to the node IDs in the graph. - * :class:`list` -- A topologically sorted list of the HUC12s which strings of length 12. - - -.. py:function:: network_resample(flw, spacing, id_col = 'comid', smoothing = None) - - Resample a network flowline based on a given spacing. - - :Parameters: * **flw** (:class:`geopandas.GeoDataFrame`) -- A dataframe with ``geometry`` and, ``id_col``, and ``levelpathi`` - columns and a projected CRS attribute. - * **spacing** (:class:`float`) -- Target spacing between the sample points in the length unit of the ``flw``'s CRS. - * **id_col** (:class:`str`, *optional*) -- Name of the flowlines column containing IDs, defaults to ``comid``. - * **smoothing** (:class:`float` or :obj:`None`, *optional*) -- Smoothing factor is used for determining the number of knots. - This arg controls the tradeoff between closeness and smoothness of fit. - Larger ``smoothing`` means more smoothing while smaller values of - ``smoothing`` indicates less smoothing. If None (default), smoothing - is done with all points. - - :returns: :class:`geopandas.GeoDataFrame` -- Resampled flowlines. - - -.. py:function:: network_xsection(flw, distance, width, id_col = 'comid', smoothing = None) - - Get cross-section of a river network at a given spacing. - - :Parameters: * **flw** (:class:`geopandas.GeoDataFrame`) -- A dataframe with ``geometry`` and, ``id_col``, and ``levelpathi`` - columns and a projected CRS attribute. - * **distance** (:class:`float`) -- The distance between two consecutive cross-sections. - * **width** (:class:`float`) -- The width of the cross-section. - * **id_col** (:class:`str`, *optional*) -- Name of the flowlines column containing IDs, defaults to ``comid``. - * **smoothing** (:class:`float` or :obj:`None`, *optional*) -- Smoothing factor is used for determining the number of knots. - This arg controls the tradeoff between closeness and smoothness of fit. - Larger ``smoothing`` means more smoothing while smaller values of - ``smoothing`` indicates less smoothing. If None (default), smoothing - is done with all points. - - :returns: :class:`geopandas.GeoDataFrame` -- A dataframe with two columns: ``geometry`` and ``comid``. The ``geometry`` - column contains the cross-section of the river network and the ``comid`` - column contains the corresponding ``comid`` from the input dataframe. - Note that each ``comid`` can have multiple cross-sections depending on - the given spacing distance. - - -.. py:function:: nhdflw2nx(flowlines, id_col = 'comid', toid_col = 'tocomid', edge_attr = None) - - Convert NHDPlus flowline database to networkx graph. - - :Parameters: * **flowlines** (:class:`geopandas.GeoDataFrame`) -- NHDPlus flowlines. - * **id_col** (:class:`str`, *optional*) -- Name of the column containing the node ID, defaults to "comid". - * **toid_col** (:class:`str`, *optional*) -- Name of the column containing the downstream node ID, defaults to "tocomid". - * **edge_attr** (:class:`str`, *optional*) -- Name of the column containing the edge attributes, defaults to ``None``. - If ``True``, all remaining columns will be used as edge attributes. - - :returns: :class:`nx.DiGraph` -- Networkx directed graph of the NHDPlus flowlines. Note that all elements of - the ``toid_col`` are replaced with negative values of their corresponding - ``id_cl`` values if they are ``NaN`` or 0. This is to ensure that the generated - nodes in the graph are unique. - - -.. py:function:: nhdplus_l48(layer = None, data_dir = 'cache', **kwargs) - - Get the entire NHDPlus dataset. - - .. rubric:: Notes - - The entire NHDPlus dataset for CONUS (Lower 48) is downloaded from - `here `__. - This 7.3 GB file will take a while to download, depending on your internet - connection. The first time you run this function, the file will be downloaded - and stored in the ``./cache`` directory. Subsequent calls will use the cached - file. Moreover, there are two additional dependencies required to read the - file: ``pyogrio`` and ``py7zr``. These dependencies can be installed using - ``pip install pyogrio py7zr`` or ``conda install -c conda-forge pyogrio py7zr``. - - :Parameters: * **layer** (:class:`str`, *optional*) -- The layer name to be returned. Either ``layer`` should be provided or - ``sql``. Defaults to ``None``. - The available layers are: - - - ``Gage`` - - ``BurnAddLine`` - - ``BurnAddWaterbody`` - - ``LandSea`` - - ``Sink`` - - ``Wall`` - - ``Catchment`` - - ``CatchmentSP`` - - ``NHDArea`` - - ``NHDWaterbody`` - - ``HUC12`` - - ``NHDPlusComponentVersions`` - - ``PlusARPointEvent`` - - ``PlusFlowAR`` - - ``NHDFCode`` - - ``DivFracMP`` - - ``BurnLineEvent`` - - ``NHDFlowline_Network`` - - ``NHDFlowline_NonNetwork`` - - ``GeoNetwork_Junctions`` - - ``PlusFlow`` - - ``N_1_Desc`` - - ``N_1_EDesc`` - - ``N_1_EStatus`` - - ``N_1_ETopo`` - - ``N_1_FloDir`` - - ``N_1_JDesc`` - - ``N_1_JStatus`` - - ``N_1_JTopo`` - - ``N_1_JTopo2`` - - ``N_1_Props`` - * **data_dire** (:class:`str` or :class:`pathlib.Pathlib.Path`) -- Directory to store the downloaded file and use in subsequent calls, - defaults to ``./cache``. - * **\*\*kwargs** -- Keyword arguments are passed to ``pyogrio.read_dataframe``. - For more information, visit - `pyogrio `__. - - :returns: :class:`geopandas.GeoDataFrame` -- A dataframe with all the NHDPlus data. - - -.. py:function:: prepare_nhdplus(flowlines, min_network_size, min_path_length, min_path_size = 0, purge_non_dendritic = False, remove_isolated = False, use_enhd_attrs = False, terminal2nan = True) - - Clean up and fix common issues of NHDPlus MR and HR flowlines. - - Ported from `nhdplusTools `__. - - :Parameters: * **flowlines** (:class:`geopandas.GeoDataFrame`) -- NHDPlus flowlines with at least the following columns: - ``comid``, ``lengthkm``, ``ftype``, ``terminalfl``, ``fromnode``, ``tonode``, - ``totdasqkm``, ``startflag``, ``streamorde``, ``streamcalc``, ``terminalpa``, - ``pathlength``, ``divergence``, ``hydroseq``, ``levelpathi``. - * **min_network_size** (:class:`float`) -- Minimum size of drainage network in sqkm - * **min_path_length** (:class:`float`) -- Minimum length of terminal level path of a network in km. - * **min_path_size** (:class:`float`, *optional*) -- Minimum size of outlet level path of a drainage basin in km. - Drainage basins with an outlet drainage area smaller than - this value will be removed. Defaults to 0. - * **purge_non_dendritic** (:class:`bool`, *optional*) -- Whether to remove non dendritic paths, defaults to ``False``. - * **remove_isolated** (:class:`bool`, *optional*) -- Whether to remove isolated flowlines, i.e., keep only the largest - connected component of the flowlines. Defaults to ``False``. - * **use_enhd_attrs** (:class:`bool`, *optional*) -- Whether to replace the attributes with the ENHD attributes, defaults - to ``False``. Note that this only works for NHDPlus mid-resolution (MR) and - does not work for NHDPlus high-resolution (HR). For more information, see - `this `__. - * **terminal2nan** (:class:`bool`, *optional*) -- Whether to replace the COMID of the terminal flowline of the network with NaN, - defaults to ``True``. If ``False``, the terminal COMID will be set from the - ENHD attributes i.e. ``use_enhd_attrs`` will be set to ``True`` which is only - applicable to NHDPlus mid-resolution (MR). - - :returns: :class:`geopandas.GeoDataFrame` -- Cleaned up flowlines. Note that all column names are converted to lower case. - - -.. py:function:: topoogical_sort(flowlines, edge_attr = None, largest_only = False, id_col = 'ID', toid_col = 'toID') - - Topological sorting of a river network. - - :Parameters: * **flowlines** (:class:`pandas.DataFrame`) -- A dataframe with columns ID and toID - * **edge_attr** (:class:`str` or :class:`list`, *optional*) -- Names of the columns in the dataframe to be used as edge attributes, defaults to None. - * **largest_only** (:class:`bool`, *optional*) -- Whether to return only the largest network, defaults to ``False``. - * **id_col** (:class:`str`, *optional*) -- Name of the column containing the node ID, defaults to ``ID``. - * **toid_col** (:class:`str`, *optional*) -- Name of the column containing the downstream node ID, defaults to ``toID``. - - :returns: :class:`(list`, dict , :class:`networkx.DiGraph)` -- A list of topologically sorted IDs, a dictionary - with keys as IDs and values as a list of its upstream nodes, - and the generated ``networkx.DiGraph`` object. Note that node - IDs are associated with the input flow line IDs, but there might - be some negative IDs in the output graph that are not present in - the input flow line IDs. These "artificial" nodes are used to represent the - graph outlet (the most downstream nodes) in the graph. - - -.. py:function:: vector_accumulation(flowlines, func, attr_col, arg_cols, id_col = 'comid', toid_col = 'tocomid') - - Flow accumulation using vector river network data. - - :Parameters: * **flowlines** (:class:`pandas.DataFrame`) -- A dataframe containing comid, tocomid, attr_col and all the columns - that ara required for passing to ``func``. - * **func** (:class:`function`) -- The function that routes the flow in a single river segment. - Positions of the arguments in the function should be as follows: - ``func(qin, *arg_cols)`` - ``qin`` is computed in this function and the rest are in the order - of the ``arg_cols``. For example, if ``arg_cols = ["slope", "roughness"]`` - then the functions is called this way: - ``func(qin, slope, roughness)`` - where slope and roughness are elemental values read from the flowlines. - * **attr_col** (:class:`str`) -- The column name of the attribute being accumulated in the network. - The column should contain the initial condition for the attribute for - each river segment. It can be a scalar or an array (e.g., time series). - * **arg_cols** (:class:`list` of :class:`strs`) -- List of the flowlines columns that contain all the required - data for a routing a single river segment such as slope, length, - lateral flow, etc. - * **id_col** (:class:`str`, *optional*) -- Name of the flowlines column containing IDs, defaults to ``comid`` - * **toid_col** (:class:`str`, *optional*) -- Name of the flowlines column containing ``toIDs``, defaults to ``tocomid`` - - :returns: :class:`pandas.Series` -- Accumulated flow for all the nodes. The dataframe is sorted from upstream - to downstream (topological sorting). Depending on the given initial - condition in the ``attr_col``, the outflow for each river segment can be - a scalar or an array. - - diff --git a/docs/source/autoapi/pynhd/nhdplus_derived/index.rst b/docs/source/autoapi/pynhd/nhdplus_derived/index.rst deleted file mode 100644 index aaec8a7..0000000 --- a/docs/source/autoapi/pynhd/nhdplus_derived/index.rst +++ /dev/null @@ -1,247 +0,0 @@ -pynhd.nhdplus_derived -===================== - -.. py:module:: pynhd.nhdplus_derived - -.. autoapi-nested-parse:: - - Access NLDI and WaterData databases. - - - - - - - -Module Contents ---------------- - -.. py:class:: StreamCat(lakes_only = False) - - Get StreamCat API's properties. - - :Parameters: **lakes_only** (:class:`bool`, *optional*) -- If ``True``, only return metrics for lakes and their associated catchments - from the LakeCat dataset. - - .. attribute:: base_url - - The base URL of the API. - - :type: :class:`str` - - .. attribute:: valid_names - - The valid names of the metrics. - - :type: :class:`list` of :class:`str` - - .. attribute:: alt_names - - The alternative names of some metrics. - - :type: :class:`dict` of :class:`str` - - .. attribute:: valid_regions - - The valid hydro regions. - - :type: :class:`list` of :class:`str` - - .. attribute:: valid_states - - The valid two letter states' abbreviations. - - :type: :class:`pandas.DataFrame` - - .. attribute:: valid_counties - - The valid counties' FIPS codes. - - :type: :class:`pandas.DataFrame` - - .. attribute:: valid_aois - - The valid types of areas of interest. - - :type: :class:`list` of :class:`str` - - .. attribute:: metrics_df - - The metrics' metadata such as description and units. - - :type: :class:`pandas.DataFrame` - - .. attribute:: valid_years - - A dictionary of the valid years for annual metrics. - - :type: :class:`dict` - - -.. py:function:: enhd_attrs(parquet_path = None) - - Get updated NHDPlus attributes from ENHD V2.0. - - .. rubric:: Notes - - This function downloads a 160 MB ``parquet`` file from - `here `__. - Although this dataframe does not include geometry, it can be - linked to other geospatial NHDPlus dataframes through ComIDs. - - :Parameters: **parquet_path** (:class:`str` or :class:`pathlib.Pathlib.Path`, *optional*) -- Path to a file with ``.parquet`` extension for storing the file, - defaults to ``./cache/enhd_attrs.parquet``. - - :returns: :class:`pandas.DataFrame` -- A dataframe that includes ComID-level attributes for - 2.7 million NHDPlus flowlines. - - -.. py:function:: epa_nhd_catchments(comids, feature) - - Get NHDPlus catchment-scale data from EPA's HMS REST API. - - .. rubric:: Notes - - For more information about curve number please refer to the project's - webpage on the EPA's - `website `__. - - :Parameters: * **comids** (:class:`int` or :class:`list` of :class:`int`) -- ComID(s) of NHDPlus catchments. - * **feature** (:class:`str`) -- The feature of interest. Available options are: - - - ``curve_number``: 16-day average Curve Number. - - ``comid_info``: ComID information. - - :returns: :class:`dict` of :class:`pandas.DataFrame` or :class:`geopandas.GeoDataFrame` -- A dict of the requested dataframes. A ``comid_info`` dataframe is - always returned. - - .. rubric:: Examples - - >>> import pynhd - >>> data = pynhd.epa_nhd_catchments(9533477, "curve_number") - >>> data["curve_number"].mean(axis=1).item() - 75.576 - - -.. py:function:: nhd_fcode() - - Get all the NHDPlus FCodes. - - -.. py:function:: nhdplus_attrs(attr_name = None) - - Stage the NHDPlus Attributes database and save to nhdplus_attrs.parquet. - - .. rubric:: Notes - - More info can be found `here `__. - - :Parameters: **attr_names** (*str , *optional**) -- Name of NHDPlus attribute to return, defaults to None, i.e., - only return a metadata dataframe that includes the attribute names - and their description and units. - - :returns: :class:`pandas.DataFrame` -- The staged data as a DataFrame. - - -.. py:function:: nhdplus_attrs_s3(attr_names = None, pyarrow_filter = None, nodata = False) - - Access NHDPlus V2.1 derived attributes over CONUS. - - .. rubric:: Notes - - More info can be found `here `__. - - :Parameters: * **attr_names** (:class:`str` or :class:`list` of :class:`str`, *optional*) -- Names of NHDPlus attribute(s) to return, defaults to None, i.e., - only return a metadata dataframe that includes the attribute names - and their description and units. - * **pyarrow_filter** (:class:`pyarrow.compute.Expression`, *optional*) -- A filter expression to apply to the dataset, defaults to None. Please - refer to the PyArrow documentation for more information - `here `__. - * **nodata** (:class:`bool`) -- Whether to include NODATA percentages, default is False. - - :returns: :class:`pandas.DataFrame` -- A dataframe of requested NHDPlus attributes. - - -.. py:function:: nhdplus_h12pp(gpkg_path = None) - - Access HUC12 Pour Points for NHDPlus V2.1 L48 (CONUS). - - .. rubric:: Notes - - More info can be found - `here `__. - - :Parameters: **gpkg_path** (:class:`str` or :class:`pathlib.Pathlib.Path`, *optional*) -- Path to the geopackage file, defaults to None, i.e., download - the file to the cache directory as ``102020wbd_outlets.gpkg``. - - :returns: :class:`geopandas.GeoDataFrame` -- A geodataframe of HUC12 pour points. - - -.. py:function:: nhdplus_vaa(parquet_path = None) - - Get NHDPlus Value Added Attributes including roughness. - - .. rubric:: Notes - - This function downloads a 245 MB ``parquet`` file from - `here `__. - Although this dataframe does not include geometry, it can be linked - to other geospatial NHDPlus dataframes through ComIDs. - - :Parameters: **parquet_path** (:class:`str` or :class:`pathlib.Pathlib.Path`, *optional*) -- Path to a file with ``.parquet`` extension for storing the file, defaults to - ``./cache/nldplus_vaa.parquet``. - - :returns: :class:`pandas.DataFrame` -- A dataframe that includes ComID-level attributes for 2.7 million - NHDPlus flowlines. - - -.. py:function:: streamcat(metric_names = None, metric_areas = None, comids = None, regions = None, states = None, counties = None, conus = False, percent_full = False, area_sqkm = False, lakes_only = False) - - Get various metrics for NHDPlusV2 catchments from EPA's StreamCat. - - .. rubric:: Notes - - For more information about the service check its webpage - at https://www.epa.gov/national-aquatic-resource-surveys/streamcat-dataset. - - :Parameters: * **metric_names** (:class:`str` or :class:`list` of :class:`str`, *optional*) -- Metric name(s) to retrieve. There are 567 metrics available. - to get a full list check out :meth:`StreamCat.valid_names`. - To get a description of each metric, check out - :meth:`StreamCat.metrics_df`. Some metrics require year and/or slope - to be specified, which have ``[Year]`` and/or ``[Slope]`` in their name. - For convenience all these variables and their years/slopes are converted - to a dict that can be accessed via :meth:`StreamCat.valid_years` and - :meth:`StreamCat.valid_slopes`. Defaults to ``None``, which will return - a dataframe of the metrics metadata. - * **metric_areas** (:class:`str` or :class:`list` of :class:`str`, *optional*) -- Areas to return the metrics for, defaults to ``None``, i.e. all areas. - Valid options are: ``cat`` for catchment, ``catrp100`` for 100-m riparian - catchment, ``ws`` for watershed, ``wsrp100`` for 100-m riparian watershed, - * **comids** (:class:`int` or :class:`list` of :class:`int`, *optional*) -- NHDPlus COMID(s), defaults to ``None``. Either ``comids``, ``regions``, - ``states``, ``counties``, or ``conus`` must be passed. They are - mutually exclusive. - * **regions** (:class:`str` or :class:`list` of :class:`str`, *optional*) -- Hydro region(s) to retrieve metrics for, defaults to ``None``. For a - full list of valid regions check out :meth:`StreamCat.valid_regions` - Either ``comids``, ``regions``, ``states``, ``counties``, or ``conus`` - must be passed. They are mutually exclusive. - * **states** (:class:`str` or :class:`list` of :class:`str`, *optional*) -- Two letter state abbreviation(s) to retrieve metrics for, defaults to - ``None``. For a full list of valid states check out - :meth:`StreamCat.valid_states` Either ``comids``, ``regions``, - ``states``, ``counties``, or ``conus`` must be passed. They are - mutually exclusive. - * **counties** (:class:`str` or :class:`list` of :class:`str`, *optional*) -- County FIPS codes(s) to retrieve metrics for, defaults to ``None``. For - a full list of valid county codes check out :meth:`StreamCat.valid_counties` - Either ``comids``, ``regions``, ``states``, ``counties``, or ``conus`` must - be passed. They are mutually exclusive. - * **conus** (:class:`bool`, *optional*) -- If ``True``, ``metric_names`` of all NHDPlus COMIDs are retrieved, - defaults ``False``. Either ``comids``, ``regions``, - ``states``, ``counties``, or ``conus`` must be passed. They are mutually - exclusive. - * **percent_full** (:class:`bool`, *optional*) -- If ``True``, return the percent of each area of interest covered by - the metric. - * **area_sqkm** (:class:`bool`, *optional*) -- If ``True``, return the area in square kilometers. - * **lakes_only** (:class:`bool`, *optional*) -- If ``True``, only return metrics for lakes and their associated catchments - from the LakeCat dataset. - - :returns: :class:`pandas.DataFrame` -- A dataframe with the requested metrics. - - diff --git a/docs/source/autoapi/pynhd/pynhd/index.rst b/docs/source/autoapi/pynhd/pynhd/index.rst deleted file mode 100644 index 1da5882..0000000 --- a/docs/source/autoapi/pynhd/pynhd/index.rst +++ /dev/null @@ -1,570 +0,0 @@ -pynhd.pynhd -=========== - -.. py:module:: pynhd.pynhd - -.. autoapi-nested-parse:: - - Access NLDI and WaterData databases. - - - - - - - -Module Contents ---------------- - -.. py:class:: HP3D(layer, outfields = '*', crs = 4326) - - - - Access USGS 3D Hydrography Program (3DHP) service. - - .. rubric:: Notes - - For more info visit: https://hydro.nationalmap.gov/arcgis/rest/services/3DHP_all/MapServer - - :Parameters: * **layer** (:class:`str`, *optional*) -- A valid service layer. Layer names with ``_hr`` are high resolution and - ``_mr`` are medium resolution. Also, layer names with ``_nonconus`` are for - non-conus areas, i.e., Alaska, Hawaii, Puerto Rico, the Virgin Islands , and - the Pacific Islands. Valid layers are: - - - ``hydrolocation_waterbody`` for Sink, Spring, Waterbody Outlet - - ``hydrolocation_flowline`` for Headwater, Terminus, Divergence, Confluence, Catchment Outlet - - ``hydrolocation_reach`` for Reach Code, External Connection - - ``flowline`` for river flowlines - - ``waterbody`` for waterbodies - - ``drainage_area`` for drainage areas - - ``catchment`` for catchments - * **outfields** (:class:`str` or :class:`list`, *optional*) -- Target field name(s), default to "*" i.e., all the fields. - * **crs** (:class:`str`, :class:`int`, or :class:`pyproj.CRS`, *optional*) -- Target spatial reference, default to ``EPSG:4326``. - - .. method:: bygeom(geom, geo_crs=4326, sql_clause="", distance=None, return_m=False, return_geom=True) - - Get features within a geometry that can be combined with a SQL where clause. - - .. method:: byids(field, fids, return_m=False, return_geom=True) - - Get features by object IDs. - - .. method:: bysql(sql_clause, return_m=False, return_geom=True) - - Get features using a valid SQL 92 WHERE clause. - - - -.. py:class:: NHD(layer, outfields = '*', crs = 4326) - - - - Access National Hydrography Dataset (NHD), both meduim and high resolution. - - .. rubric:: Notes - - For more info visit: https://hydro.nationalmap.gov/arcgis/rest/services/nhd/MapServer - - :Parameters: * **layer** (:class:`str`, *optional*) -- A valid service layer. Layer names with ``_hr`` are high resolution and - ``_mr`` are medium resolution. Also, layer names with ``_nonconus`` are for - non-conus areas, i.e., Alaska, Hawaii, Puerto Rico, the Virgin Islands , and - the Pacific Islands. Valid layers are: - - - ``point`` - - ``point_event`` - - ``line_hr`` - - ``flow_direction`` - - ``flowline_mr`` - - ``flowline_hr_nonconus`` - - ``flowline_hr`` - - ``area_mr`` - - ``area_hr_nonconus`` - - ``area_hr`` - - ``waterbody_mr`` - - ``waterbody_hr_nonconus`` - - ``waterbody_hr`` - * **outfields** (:class:`str` or :class:`list`, *optional*) -- Target field name(s), default to "*" i.e., all the fields. - * **crs** (:class:`str`, :class:`int`, or :class:`pyproj.CRS`, *optional*) -- Target spatial reference, default to ``EPSG:4326``. - - .. method:: bygeom(geom, geo_crs=4326, sql_clause="", distance=None, return_m=False, return_geom=True) - - Get features within a geometry that can be combined with a SQL where clause. - - .. method:: byids(field, fids, return_m=False, return_geom=True) - - Get features by object IDs. - - .. method:: bysql(sql_clause, return_m=False, return_geom=True) - - Get features using a valid SQL 92 WHERE clause. - - - -.. py:class:: NHDPlusHR(layer, outfields = '*', crs = 4326) - - - - Access National Hydrography Dataset (NHD) Plus high resolution. - - .. rubric:: Notes - - For more info visit: https://hydro.nationalmap.gov/arcgis/rest/services/NHDPlus_HR/MapServer - - :Parameters: * **layer** (:class:`str`, *optional*) -- A valid service layer. Valid layers are: - - - ``gage`` for NHDPlusGage layer - - ``sink`` for NHDPlusSink layer - - ``point`` for NHDPoint layer - - ``flowline`` for NetworkNHDFlowline layer - - ``non_network_flowline`` for NonNetworkNHDFlowline layer - - ``flow_direction`` for FlowDirection layer - - ``wall`` for NHDPlusWall layer - - ``line`` for NHDLine layer - - ``area`` for NHDArea layer - - ``waterbody`` for NHDWaterbody layer - - ``catchment`` for NHDPlusCatchment layer - - ``boundary_unit`` for NHDPlusBoundaryUnit layer - - ``huc12`` for WBDHU12 layer - * **outfields** (:class:`str` or :class:`list`, *optional*) -- Target field name(s), default to "*" i.e., all the fields. - * **crs** (:class:`str`, :class:`int`, or :class:`pyproj.CRS`, *optional*) -- Target spatial reference, default to ``EPSG:4326``. - - .. method:: bygeom(geom, geo_crs=4326, sql_clause="", distance=None, return_m=False, return_geom=True) - - Get features within a geometry that can be combined with a SQL where clause. - - .. method:: byids(field, fids, return_m=False, return_geom=True) - - Get features by object IDs. - - .. method:: bysql(sql_clause, return_m=False, return_geom=True) - - Get features using a valid SQL 92 WHERE clause. - - - -.. py:class:: NLDI - - Access the Hydro Network-Linked Data Index (NLDI) service. - - - .. py:method:: comid_byloc(coords, loc_crs = 4326) - - Get the closest ComID based on coordinates using ``hydrolocation`` endpoint. - - .. rubric:: Notes - - This function tries to find the closest ComID based on flowline grid cells. If - such a cell is not found, it will return the closest ComID using the flowtrace - endpoint of the PyGeoAPI service to find the closest downstream ComID. The returned - dataframe has a ``measure`` column that indicates the location of the input - coordinate on the flowline as a percentage of the total flowline length. - - :Parameters: * **coords** (:class:`tuple` or :class:`list` of :class:`tuples`) -- A tuple of length two (x, y) or a list of them. - * **loc_crs** (:class:`str`, :class:`int`, or :class:`pyproj.CRS`, *optional*) -- The spatial reference of the input coordinate, defaults to EPSG:4326. - - :returns: :class:`geopandas.GeoDataFrame` or :class:`(geopandas.GeoDataFrame`, :class:`list)` -- NLDI indexed ComID(s) and points in EPSG:4326. If some coords don't return - any ComID a list of missing coords are returned as well. - - - - .. py:method:: feature_byloc(coords, loc_crs = 4326) - - Get the closest feature ID(s) based on coordinates using ``position`` endpoint. - - :Parameters: * **coords** (:class:`tuple` or :class:`list`) -- A tuple of length two (x, y) or a list of them. - * **loc_crs** (:class:`str`, :class:`int`, or :class:`pyproj.CRS`, *optional*) -- The spatial reference of the input coordinate, defaults to EPSG:4326. - - :returns: :class:`geopandas.GeoDataFrame` or :class:`(geopandas.GeoDataFrame`, :class:`list)` -- NLDI indexed feature ID(s) and flowlines in EPSG:4326. If some coords don't - return any IDs a list of missing coords are returned as well. - - - - .. py:method:: get_basins(feature_ids, fsource = 'nwissite', split_catchment = False, simplified = True) - - Get basins for a list of station IDs. - - :Parameters: * **feature_ids** (:class:`str` or :class:`list`) -- Target feature ID(s). - * **fsource** (:class:`str`) -- The name of feature(s) source, defaults to ``nwissite``. - The valid sources are: - - * 'comid' for NHDPlus comid. - * 'ca_gages' for Streamgage catalog for CA SB19 - * 'gfv11_pois' for USGS Geospatial Fabric V1.1 Points of Interest - * 'huc12pp' for HUC12 Pour Points - * 'nmwdi-st' for New Mexico Water Data Initiative Sites - * 'nwisgw' for NWIS Groundwater Sites - * 'nwissite' for NWIS Surface Water Sites - * 'ref_gage' for geoconnex.us reference gages - * 'vigil' for Vigil Network Data - * 'wade' for Water Data Exchange 2.0 Sites - * 'WQP' for Water Quality Portal - * **split_catchment** (:class:`bool`, *optional*) -- If ``True``, split basins at their outlet locations. Default to ``False``. - * **simplified** (:class:`bool`, *optional*) -- If ``True``, return a simplified version of basin geometries. Default to ``True``. - - :returns: :class:`geopandas.GeoDataFrame` or :class:`(geopandas.GeoDataFrame`, :class:`list)` -- NLDI indexed basins in EPSG:4326. If some IDs don't return any features - a list of missing ID(s) are returned as well. - - - - .. py:method:: get_characteristics(char_list, comids = None) - :staticmethod: - - - Get characteristics using a list ComIDs. - - :Parameters: * **char_list** (:class:`str` or :class:`list`) -- The list of characteristics to get. - * **comids** (:class:`int` or :class:`list`, *optional*) -- The list of ComIDs, defaults to None, i.e., all NHDPlus ComIDs. - - :returns: :class:`pandas.DataFrame` -- The characteristics of the requested ComIDs. - - - - .. py:method:: getfeature_byid(fsource, fids) - - Get feature(s) based ID(s). - - :Parameters: * **fsource** (:class:`str`) -- The name of feature(s) source. The valid sources are: - - * 'comid' for NHDPlus comid. - * 'ca_gages' for Streamgage catalog for CA SB19 - * 'gfv11_pois' for USGS Geospatial Fabric V1.1 Points of Interest - * 'huc12pp' for HUC12 Pour Points - * 'nmwdi-st' for New Mexico Water Data Initiative Sites - * 'nwisgw' for NWIS Groundwater Sites - * 'nwissite' for NWIS Surface Water Sites - * 'ref_gage' for geoconnex.us reference gages - * 'vigil' for Vigil Network Data - * 'wade' for Water Data Exchange 2.0 Sites - * 'WQP' for Water Quality Portal - * **fid** (:class:`str` or :class:`list` of :class:`str`) -- Feature ID(s). - - :returns: :class:`geopandas.GeoDataFrame` or :class:`(geopandas.GeoDataFrame`, :class:`list)` -- NLDI indexed features in EPSG:4326. If some IDs don't return any features - a list of missing ID(s) are returned as well. - - - - .. py:method:: navigate_byid(fsource, fid, navigation, source, distance = 500, trim_start = False, stop_comid = None) - - Navigate the NHDPlus database from a single feature id up to a distance. - - :Parameters: * **fsource** (:class:`str`) -- The name of feature(s) source. The valid sources are: - - * 'comid' for NHDPlus comid. - * 'ca_gages' for Streamgage catalog for CA SB19 - * 'gfv11_pois' for USGS Geospatial Fabric V1.1 Points of Interest - * 'huc12pp' for HUC12 Pour Points - * 'nmwdi-st' for New Mexico Water Data Initiative Sites - * 'nwisgw' for NWIS Groundwater Sites - * 'nwissite' for NWIS Surface Water Sites - * 'ref_gage' for geoconnex.us reference gages - * 'vigil' for Vigil Network Data - * 'wade' for Water Data Exchange 2.0 Sites - * 'WQP' for Water Quality Portal - * **fid** (:class:`str` or :class:`int`) -- The ID of the feature. - * **navigation** (:class:`str`) -- The navigation method. - * **source** (:class:`str`) -- Return the data from another source after navigating - features from ``fsource``. - * **distance** (:class:`int`, *optional*) -- Limit the search for navigation up to a distance in km, - defaults is 500 km. Note that this is an expensive request so you - have be mindful of the value that you provide. The value must be - between 1 to 9999 km. - * **trim_start** (:class:`bool`, *optional*) -- If ``True``, trim the starting flowline at the source feature, - defaults to ``False``. - * **stop_comid** (:class:`str` or :class:`int`, *optional*) -- The ComID to stop the navigationation, defaults to ``None``. - - :returns: :class:`geopandas.GeoDataFrame` -- NLDI indexed features in EPSG:4326. - - - - .. py:method:: navigate_byloc(coords, navigation = None, source = None, loc_crs = 4326, distance = 500, trim_start = False, stop_comid = None) - - Navigate the NHDPlus database from a coordinate. - - .. rubric:: Notes - - This function first calls the ``feature_byloc`` function to get the - comid of the nearest flowline and then calls the ``navigate_byid`` - function to get the features from the obtained ``comid``. - - :Parameters: * **coords** (:class:`tuple`) -- A tuple of length two (x, y). - * **navigation** (:class:`str`, *optional*) -- The navigation method, defaults to None which throws an exception - if ``comid_only`` is False. - * **source** (:class:`str`, *optional*) -- Return the data from another source after navigating - the features based on ``comid``, defaults to ``None`` which throws - an exception if ``comid_only`` is False. - * **loc_crs** (:class:`str`, :class:`int`, or :class:`pyproj.CRS`, *optional*) -- The spatial reference of the input coordinate, defaults to EPSG:4326. - * **distance** (:class:`int`, *optional*) -- Limit the search for navigation up to a distance in km, - defaults to 500 km. Note that this is an expensive request so you - have be mindful of the value that you provide. - * **trim_start** (:class:`bool`, *optional*) -- If ``True``, trim the starting flowline at the source feature, - defaults to ``False``. - * **stop_comid** (:class:`str` or :class:`int`, *optional*) -- The ComID to stop the navigationation, defaults to ``None``. - - :returns: :class:`geopandas.GeoDataFrame` -- NLDI indexed features in EPSG:4326. - - - -.. py:class:: PyGeoAPI - - - - Access `PyGeoAPI `__ service. - - - .. py:method:: cross_section(coord, width, numpts, crs = 4326) - - Return a GeoDataFrame from the xsatpoint service. - - :Parameters: * **coord** (:class:`tuple`) -- The coordinate of the point to extract the cross-section as a tuple,e.g., (lon, lat). - * **width** (:class:`float`) -- The width of the cross-section in meters. - * **numpts** (:class:`int`) -- The number of points to extract the cross-section from the DEM. - * **crs** (:class:`str`, :class:`int`, or :class:`pyproj.CRS`, *optional*) -- The coordinate reference system of the coordinates, defaults to EPSG:4326. - - :returns: :class:`geopandas.GeoDataFrame` -- A GeoDataFrame containing the cross-section at the requested point. - - .. rubric:: Examples - - >>> from pynhd import PyGeoAPI - >>> pga = PyGeoAPI() - >>> gdf = pga.cross_section((-103.80119, 40.2684), width=1000.0, numpts=101, crs=4326) # doctest: +SKIP - >>> print(gdf.iloc[-1, 1]) # doctest: +SKIP - 1000.0 - - - - .. py:method:: elevation_profile(line, numpts, dem_res, crs = 4326) - - Return a GeoDataFrame from the xsatpathpts service. - - :Parameters: * **line** (:class:`shapely.LineString` or :class:`shapely.MultiLineString`) -- The line to extract the elevation profile for. - * **numpts** (:class:`int`) -- The number of points to extract the elevation profile from the DEM. - * **dem_res** (:class:`int`) -- The target resolution for requesting the DEM from 3DEP service. - * **crs** (:class:`str`, :class:`int`, or :class:`pyproj.CRS`, *optional*) -- The coordinate reference system of the coordinates, defaults to EPSG:4326. - - :returns: :class:`geopandas.GeoDataFrame` -- A GeoDataFrame containing the elevation profile along the requested endpoints. - - .. rubric:: Examples - - >>> from pynhd import PyGeoAPI - >>> from shapely import LineString - >>> pga = PyGeoAPI() - >>> line = LineString([(-103.801086, 40.26772), (-103.80097, 40.270568)]) - >>> gdf = pga.elevation_profile(line, 101, 1, 4326) # doctest: +SKIP - >>> print(gdf.iloc[-1, 2]) # doctest: +SKIP - 1299.8727 - - - - .. py:method:: endpoints_profile(coords, numpts, dem_res, crs = 4326) - - Return a GeoDataFrame from the xsatendpts service. - - :Parameters: * **coords** (:class:`list`) -- A list of two coordinates to trace as a list of tuples, e.g., - [(x1, y1), (x2, y2)]. - * **numpts** (:class:`int`) -- The number of points to extract the elevation profile from the DEM. - * **dem_res** (:class:`int`) -- The target resolution for requesting the DEM from 3DEP service. - * **crs** (:class:`str`, :class:`int`, or :class:`pyproj.CRS`, *optional*) -- The coordinate reference system of the coordinates, defaults to EPSG:4326. - - :returns: :class:`geopandas.GeoDataFrame` -- A GeoDataFrame containing the elevation profile along the requested endpoints. - - .. rubric:: Examples - - >>> from pynhd import PyGeoAPI - >>> pga = PyGeoAPI() - >>> gdf = pga.endpoints_profile( - ... [(-103.801086, 40.26772), (-103.80097, 40.270568)], numpts=101, dem_res=1, crs=4326 - ... ) # doctest: +SKIP - >>> print(gdf.iloc[-1, 1]) # doctest: +SKIP - 411.5906 - - - - .. py:method:: flow_trace(coord, crs = 4326, direction = 'none') - - Return a GeoDataFrame from the flowtrace service. - - :Parameters: * **coord** (:class:`tuple`) -- The coordinate of the point to trace as a tuple,e.g., (lon, lat). - * **crs** (:class:`str`) -- The coordinate reference system of the coordinates, defaults to EPSG:4326. - * **direction** (:class:`str`, *optional*) -- The direction of flowpaths, either ``down``, ``up``, or ``none``. - Defaults to ``none``. - - :returns: :class:`geopandas.GeoDataFrame` -- A GeoDataFrame containing the traced flowline. - - .. rubric:: Examples - - >>> from pynhd import PyGeoAPI - >>> pga = PyGeoAPI() - >>> gdf = pga.flow_trace( - ... (1774209.63, 856381.68), crs="ESRI:102003", direction="none" - ... ) # doctest: +SKIP - >>> print(gdf.comid.iloc[0]) # doctest: +SKIP - 22294818 - - - - .. py:method:: split_catchment(coord, crs = 4326, upstream = False) - - Return a GeoDataFrame from the splitcatchment service. - - :Parameters: * **coord** (:class:`tuple`) -- The coordinate of the point to trace as a tuple,e.g., (lon, lat). - * **crs** (:class:`str`, :class:`int`, or :class:`pyproj.CRS`, *optional*) -- The coordinate reference system of the coordinates, defaults to EPSG:4326. - * **upstream** (:class:`bool`, *optional*) -- If True, return all upstream catchments rather than just the local catchment, - defaults to False. - - :returns: :class:`geopandas.GeoDataFrame` -- A GeoDataFrame containing the local catchment or the entire upstream catchments. - - .. rubric:: Examples - - >>> from pynhd import PyGeoAPI - >>> pga = PyGeoAPI() - >>> gdf = pga.split_catchment((-73.82705, 43.29139), crs=4326, upstream=False) # doctest: +SKIP - >>> print(gdf.catchmentID.iloc[0]) # doctest: +SKIP - 22294818 - - - -.. py:class:: WaterData(layer, crs = 4326) - - Access `WaterData `__ service. - - :Parameters: * **layer** (:class:`str`) -- A valid layer from the WaterData service. Valid layers are: - - - ``catchmentsp`` - - ``gagesii`` - - ``gagesii_basins`` - - ``nhdarea`` - - ``nhdflowline_network`` - - ``nhdflowline_nonnetwork`` - - ``nhdwaterbody`` - - ``wbd02`` - - ``wbd04`` - - ``wbd06`` - - ``wbd08`` - - ``wbd10`` - - ``wbd12`` - - Note that all ``wbd*`` layers provide access to the October 2020 - snapshot of the Watershed Boundary Dataset (WBD). If you need the - latest version, please use the ``WBD`` class from the - `PyGeoHydro `__ - package. - * **crs** (:class:`str`, :class:`int`, or :class:`pyproj.CRS`, *optional*) -- The target spatial reference system, defaults to ``epsg:4326``. - - - .. py:method:: bybox(bbox, box_crs = 4326, sort_attr = None) - - Get features within a bounding box. - - :Parameters: * **bbox** (:class:`tuple` of :class:`floats`) -- A bounding box in the form of (minx, miny, maxx, maxy). - * **box_crs** (:class:`str`, :class:`int`, or :class:`pyproj.CRS`, *optional*) -- The spatial reference system of the bounding box, defaults to ``epsg:4326``. - * **sort_attr** (:class:`str`, *optional*) -- The column name in the database to sort request by, defaults - to the first attribute in the schema that contains ``id`` in its name. - - :returns: :class:`geopandas.GeoDataFrame` -- The requested features in a GeoDataFrames. - - - - .. py:method:: bydistance(coords, distance, loc_crs = 4326, sort_attr = None) - - Get features within a radius (in meters) of a point. - - :Parameters: * **coords** (:class:`tuple` of :class:`float`) -- The x, y coordinates of the point. - * **distance** (:class:`int`) -- The radius (in meters) to search within. - * **loc_crs** (:class:`str`, :class:`int`, or :class:`pyproj.CRS`, *optional*) -- The CRS of the input coordinates, default to ``epsg:4326``. - * **sort_attr** (:class:`str`, *optional*) -- The column name in the database to sort request by, defaults - to the first attribute in the schema that contains ``id`` in its name. - - :returns: :class:`geopandas.GeoDataFrame` -- Requested features as a GeoDataFrame. - - - - .. py:method:: byfilter(cql_filter, method = 'GET', sort_attr = None) - - Get features based on a CQL filter. - - :Parameters: * **cql_filter** (:class:`str`) -- The CQL filter to use for requesting the data. - * **method** (:class:`str`, *optional*) -- The HTTP method to use for requesting the data, defaults to GET. - Allowed methods are GET and POST. - * **sort_attr** (:class:`str`, *optional*) -- The column name in the database to sort request by, defaults - to the first attribute in the schema that contains ``id`` in its name. - - :returns: :class:`geopandas.GeoDataFrame` -- The requested features as a GeoDataFrames. - - - - .. py:method:: bygeom(geometry, geo_crs = 4326, xy = True, predicate = 'intersects', sort_attr = None) - - Get features within a geometry. - - :Parameters: * **geometry** (:class:`shapely.Polygon` or :class:`shapely.MultiPolygon`) -- The input (multi)polygon to request the data. - * **geo_crs** (:class:`str`, :class:`int`, or :class:`pyproj.CRS`, *optional*) -- The CRS of the input geometry, default to epsg:4326. - * **xy** (:class:`bool`, *optional*) -- Whether axis order of the input geometry is xy or yx. - * **predicate** (:class:`str`, *optional*) -- The geometric prediacte to use for requesting the data, defaults to - INTERSECTS. Valid predicates are: - - - ``equals`` - - ``disjoint`` - - ``intersects`` - - ``touches`` - - ``crosses`` - - ``within`` - - ``contains`` - - ``overlaps`` - - ``relate`` - - ``beyond`` - * **sort_attr** (:class:`str`, *optional*) -- The column name in the database to sort request by, defaults - to the first attribute in the schema that contains ``id`` in its name. - - :returns: :class:`geopandas.GeoDataFrame` -- The requested features in the given geometry. - - - - .. py:method:: byid(featurename, featureids) - - Get features based on IDs. - - - -.. py:function:: pygeoapi(geodf, service) - - Return a GeoDataFrame from the flowtrace service. - - :Parameters: * **geodf** (:class:`geopandas.GeoDataFrame`) -- A GeoDataFrame containing geometries to query. - The required columns for each service are: - - * ``flow_trace``: ``direction`` that indicates the direction of the flow trace. - It can be ``up``, ``down``, or ``none`` (both directions). - * ``split_catchment``: ``upstream`` that indicates whether to return all upstream - catchments or just the local catchment. - * ``elevation_profile``: ``numpts`` that indicates the number of points to extract - along the flowpath and ``3dep_res`` that indicates the target resolution for - requesting the DEM from 3DEP service. - * ``endpoints_profile``: ``numpts`` that indicates the number of points to extract - along the flowpath and ``3dep_res`` that indicates the target resolution for - requesting the DEM from 3DEP service. - * ``cross_section``: ``numpts`` that indicates the number of points to extract - along the flowpath and ``width`` that indicates the width of the cross-section - in meters. - * **service** (:class:`str`) -- The service to query, can be ``flow_trace``, ``split_catchment``, ``elevation_profile``, - ``endpoints_profile``, or ``cross_section``. - - :returns: :class:`geopandas.GeoDataFrame` -- A GeoDataFrame containing the results of requested service. - - .. rubric:: Examples - - >>> from shapely import Point - >>> import geopandas as gpd - >>> gdf = gpd.GeoDataFrame( - ... { - ... "direction": [ - ... "none", - ... ] - ... }, - ... geometry=[Point((1774209.63, 856381.68))], - ... crs="ESRI:102003", - ... ) - >>> trace = nhd.pygeoapi(gdf, "flow_trace") - >>> print(trace.comid.iloc[0]) - 22294818 - - diff --git a/docs/source/autoapi/pynldas2/index.rst b/docs/source/autoapi/pynldas2/index.rst deleted file mode 100644 index d9852c9..0000000 --- a/docs/source/autoapi/pynldas2/index.rst +++ /dev/null @@ -1,20 +0,0 @@ -pynldas2 -======== - -.. py:module:: pynldas2 - -.. autoapi-nested-parse:: - - Top-level package. - - - -Submodules ----------- - -.. toctree:: - :maxdepth: 1 - - /autoapi/pynldas2/pynldas2/index - - diff --git a/docs/source/autoapi/pynldas2/pynldas2/index.rst b/docs/source/autoapi/pynldas2/pynldas2/index.rst deleted file mode 100644 index 86ce05a..0000000 --- a/docs/source/autoapi/pynldas2/pynldas2/index.rst +++ /dev/null @@ -1,62 +0,0 @@ -pynldas2.pynldas2 -================= - -.. py:module:: pynldas2.pynldas2 - -.. autoapi-nested-parse:: - - Get hourly NLDAS2 forcing data. - - - - - -Module Contents ---------------- - -.. py:function:: get_bycoords(coords, start_date, end_date, coords_id = None, crs = 4326, variables = None, to_xarray = False, snow = False, snow_params = None) - - Get NLDAS-2 climate forcing data for a list of coordinates. - - :Parameters: * **coords** (:class:`list` of :class:`tuples`) -- List of (lon, lat) coordinates. - * **start_date** (:class:`str`) -- Start date of the data. - * **end_date** (:class:`str`) -- End date of the data. - * **crs** (:class:`str`, :class:`int`, or :class:`pyproj.CRS`, *optional*) -- The CRS of the input coordinates, defaults to ``EPSG:4326``. - * **variables** (:class:`str` or :class:`list` of :class:`str`, *optional*) -- Variables to download. If None, all variables are downloaded. - Valid variables are: ``prcp``, ``pet``, ``temp``, ``wind_u``, ``wind_v``, - ``rlds``, ``rsds``, and ``humidity`` and ``psurf``. - * **to_xarray** (:class:`bool`, *optional*) -- If True, the data is returned as an xarray dataset. - * **snow** (:class:`bool`, *optional*) -- Compute snowfall from precipitation and temperature. Defaults to ``False``. - * **snow_params** (:class:`dict`, *optional*) -- Model-specific parameters as a dictionary that is passed to the snowfall function. - These parameters are only used if ``snow`` is ``True``. Two parameters are required: - ``t_rain`` (deg C) which is the threshold for temperature for considering rain and - ``t_snow`` (deg C) which is the threshold for temperature for considering snow. - The default values are ``{'t_rain': 2.5, 't_snow': 0.6}`` that are adopted from - https://doi.org/10.5194/gmd-11-1077-2018. - - :returns: :class:`pandas.DataFrame` -- The requested data as a dataframe. - - -.. py:function:: get_bygeom(geometry, start_date, end_date, geo_crs = 4326, variables = None, snow = False, snow_params = None) - - Get hourly NLDAS-2 climate forcing within a geometry at 0.125 resolution. - - :Parameters: * **geometry** (:class:`Polygon` or :class:`tuple`) -- The geometry of the region of interest. It can be a shapely Polygon or a tuple - of length 4 representing the bounding box (minx, miny, maxx, maxy). - * **start_date** (:class:`str`) -- Start date of the data. - * **end_date** (:class:`str`) -- End date of the data. - * **geo_crs** (:class:`int`, :class:`str`, or :class:`pyproj.CRS`) -- CRS of the input geometry - * **variables** (:class:`str` or :class:`list` of :class:`str`, *optional*) -- Variables to download. If None, all variables are downloaded. - Valid variables are: ``prcp``, ``pet``, ``temp``, ``wind_u``, ``wind_v``, - ``rlds``, ``rsds``, and ``humidity`` and ``psurf``. - * **snow** (:class:`bool`, *optional*) -- Compute snowfall from precipitation and temperature. Defaults to ``False``. - * **snow_params** (:class:`dict`, *optional*) -- Model-specific parameters as a dictionary that is passed to the snowfall function. - These parameters are only used if ``snow`` is ``True``. Two parameters are required: - ``t_rain`` (deg C) which is the threshold for temperature for considering rain and - ``t_snow`` (deg C) which is the threshold for temperature for considering snow. - The default values are ``{'t_rain': 2.5, 't_snow': 0.6}`` that are adopted from - https://doi.org/10.5194/gmd-11-1077-2018. - - :returns: :class:`xarray.Dataset` -- The requested forcing data. - - diff --git a/docs/source/conf.py b/docs/source/conf.py index 96a90ae..d2c438b 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -105,7 +105,7 @@ ] autoapi_options = ["members"] autoapi_member_order = "groupwise" -autoapi_keep_files = True +autoapi_keep_files = False autoapi_add_toctree_entry = False modindex_common_prefix = [ "pynhd.", @@ -207,7 +207,6 @@ # -- Options for HTML output ------------------------------------------------- html_static_path = ["_static"] -# html_css_files = ["style.css"] today_fmt = "%Y-%m-%d" pygments_style = "sphinx" @@ -215,7 +214,6 @@ # sphinx_book_theme configurations html_theme = "pydata_sphinx_theme" -# html_theme = "sphinx_book_theme" html_title = "" # logo @@ -230,21 +228,6 @@ "doc_path": "docs", } html_baseurl = "https://docs.hyriver.io" -# html_theme_options = { -# "repository_url": "https://github.com/hyriver/hyriver.github.io", -# "repository_branch": "main", -# "path_to_docs": "docs", -# "launch_buttons": { -# "binderhub_url": "https://mybinder.org/v2/gh/hyriver/HyRiver-examples/main?urlpath=lab/tree/notebooks", -# "notebook_interface": "jupyterlab", -# }, -# "use_edit_page_button": True, -# "use_repository_button": True, -# "use_download_button": False, -# "use_issues_button": True, -# "home_page_in_toc": True, -# "navigation_with_keys": False, -# } html_theme_options = { "header_links_before_dropdown": 4, @@ -260,26 +243,14 @@ "icon": "fa-brands fa-github", }, ], - # alternative way to set twitter and github header icons - # "github_url": "https://github.com/pydata/pydata-sphinx-theme", - # "twitter_url": "https://twitter.com/PyData", "logo": { "text": "", "image_dark": "_static/hyriver_logo_text.svg", }, "use_edit_page_button": True, "show_toc_level": 1, - "navbar_align": "left", # [left, content, right] For testing that the navbar items align properly - # "show_nav_level": 2, - # "announcement": "https://raw.githubusercontent.com/pydata/pydata-sphinx-theme/main/docs/_templates/custom-template.html", - # "show_version_warning_banner": True, + "navbar_align": "left", "navbar_center": ["version-switcher", "navbar-nav"], - # "navbar_start": ["navbar-logo"], - # "navbar_end": ["theme-switcher", "navbar-icon-links"], - # "navbar_persistent": ["search-button"], - # "primary_sidebar_end": ["custom-template", "sidebar-ethical-ads"], - # "article_footer_items": ["test", "test"], - # "content_footer_items": ["test", "test"], "footer_start": ["copyright"], "footer_center": ["sphinx-version"], "secondary_sidebar_items": { @@ -290,7 +261,6 @@ "json_url": "https://docs.hyriver.io/_static/switcher.json", "version_match": version, }, - # "back_to_top_button": False, "navigation_with_keys": False, } @@ -432,7 +402,7 @@ def update_versions(app: Sphinx)-> None: versions.append( { "package": n, - "path": f"{p}/index.html", + "path": f"autoapi/{p.replace('-', '_')}/index.html", "version": json.loads(r.read().decode('utf-8'))["info"]["version"], } ) diff --git a/docs/source/index.rst b/docs/source/index.rst index 717c233..abf4d81 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -227,7 +227,7 @@ If you use any of HyRiver packages in your research, we appreciate citations: :maxdepth: 1 :hidden: - autoapi/index + apis changelogs contributing authors