diff --git a/doc/api/index.rst b/doc/api/index.rst index 14a8fd31708..f0275cc1e2f 100644 --- a/doc/api/index.rst +++ b/doc/api/index.rst @@ -142,6 +142,7 @@ and store them in the GMT cache folder. datasets.load_ocean_ridge_points datasets.load_sample_bathymetry datasets.load_usgs_quakes + datasets.load_fractures_compilation .. automodule:: pygmt.exceptions diff --git a/pygmt/datasets/__init__.py b/pygmt/datasets/__init__.py index a7cfdf17998..1d06e7f08fe 100644 --- a/pygmt/datasets/__init__.py +++ b/pygmt/datasets/__init__.py @@ -4,6 +4,7 @@ from pygmt.datasets.earth_relief import load_earth_relief from pygmt.datasets.samples import ( + load_fractures_compilation, load_japan_quakes, load_ocean_ridge_points, load_sample_bathymetry, diff --git a/pygmt/datasets/samples.py b/pygmt/datasets/samples.py index 729ee1962b5..809fe6b183c 100644 --- a/pygmt/datasets/samples.py +++ b/pygmt/datasets/samples.py @@ -101,3 +101,25 @@ def load_usgs_quakes(): fname = which("@usgs_quakes_22.txt", download="c") data = pd.read_csv(fname) return data + + +def load_fractures_compilation(): + """ + Load a table of fracture lengths and azimuths as hypothetically digitized + from geological maps as a pandas.DataFrame. + + This is the ``@fractures_06.txt`` dataset used in the GMT tutorials. + + The data are downloaded to a cache directory (usually ``~/.gmt/cache``) the + first time you invoke this function. Afterwards, it will load the data from + the cache. So you'll need an internet connection the first time around. + + Returns + ------- + data : pandas.DataFrame + The data table. Use ``print(data.describe())`` to see the available + columns. + """ + fname = which("@fractures_06.txt", download="c") + data = pd.read_csv(fname, header=None, sep=r"\s+", names=["azimuth", "length"]) + return data[["length", "azimuth"]] diff --git a/pygmt/tests/test_datasets_samples.py b/pygmt/tests/test_datasets_samples.py index 81148fc2a04..ff55c64d652 100644 --- a/pygmt/tests/test_datasets_samples.py +++ b/pygmt/tests/test_datasets_samples.py @@ -2,6 +2,7 @@ Test basic functionality for loading sample datasets. """ from pygmt.datasets import ( + load_fractures_compilation, load_japan_quakes, load_ocean_ridge_points, load_sample_bathymetry, @@ -58,3 +59,16 @@ def test_usgs_quakes(): """ data = load_usgs_quakes() assert data.shape == (1197, 22) + + +def test_fractures_compilation(): + """ + Check that the @fractures_06.txt dataset loads without errors. + """ + data = load_fractures_compilation() + assert data.shape == (361, 2) + summary = data.describe() + assert summary.loc["min", "length"] == 98.6561 + assert summary.loc["max", "length"] == 984.652 + assert summary.loc["min", "azimuth"] == 0.0 + assert summary.loc["max", "azimuth"] == 360.0