Skip to content

Commit

Permalink
Add new function to load fractures sample data (#1101)
Browse files Browse the repository at this point in the history
Co-authored-by: Meghan Jones <[email protected]>
Co-authored-by: Wei Ji <[email protected]>
Co-authored-by: Dongdong Tian <[email protected]>
  • Loading branch information
4 people authored Mar 24, 2021
1 parent 65f5aee commit 9ecb7d8
Show file tree
Hide file tree
Showing 4 changed files with 38 additions and 0 deletions.
1 change: 1 addition & 0 deletions doc/api/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -142,6 +142,7 @@ and store them in the GMT cache folder.
datasets.load_ocean_ridge_points
datasets.load_sample_bathymetry
datasets.load_usgs_quakes
datasets.load_fractures_compilation

.. automodule:: pygmt.exceptions

Expand Down
1 change: 1 addition & 0 deletions pygmt/datasets/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@

from pygmt.datasets.earth_relief import load_earth_relief
from pygmt.datasets.samples import (
load_fractures_compilation,
load_japan_quakes,
load_ocean_ridge_points,
load_sample_bathymetry,
Expand Down
22 changes: 22 additions & 0 deletions pygmt/datasets/samples.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,3 +101,25 @@ def load_usgs_quakes():
fname = which("@usgs_quakes_22.txt", download="c")
data = pd.read_csv(fname)
return data


def load_fractures_compilation():
"""
Load a table of fracture lengths and azimuths as hypothetically digitized
from geological maps as a pandas.DataFrame.
This is the ``@fractures_06.txt`` dataset used in the GMT tutorials.
The data are downloaded to a cache directory (usually ``~/.gmt/cache``) the
first time you invoke this function. Afterwards, it will load the data from
the cache. So you'll need an internet connection the first time around.
Returns
-------
data : pandas.DataFrame
The data table. Use ``print(data.describe())`` to see the available
columns.
"""
fname = which("@fractures_06.txt", download="c")
data = pd.read_csv(fname, header=None, sep=r"\s+", names=["azimuth", "length"])
return data[["length", "azimuth"]]
14 changes: 14 additions & 0 deletions pygmt/tests/test_datasets_samples.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
Test basic functionality for loading sample datasets.
"""
from pygmt.datasets import (
load_fractures_compilation,
load_japan_quakes,
load_ocean_ridge_points,
load_sample_bathymetry,
Expand Down Expand Up @@ -58,3 +59,16 @@ def test_usgs_quakes():
"""
data = load_usgs_quakes()
assert data.shape == (1197, 22)


def test_fractures_compilation():
"""
Check that the @fractures_06.txt dataset loads without errors.
"""
data = load_fractures_compilation()
assert data.shape == (361, 2)
summary = data.describe()
assert summary.loc["min", "length"] == 98.6561
assert summary.loc["max", "length"] == 984.652
assert summary.loc["min", "azimuth"] == 0.0
assert summary.loc["max", "azimuth"] == 360.0

0 comments on commit 9ecb7d8

Please sign in to comment.