diff --git a/README.md b/README.md
index 36ccfb5c36..16cb293f8c 100644
--- a/README.md
+++ b/README.md
@@ -1,7 +1,7 @@
-### Version 3.2.9 develop — build 216
+### Version 3.2.9 develop — build 218
[![Build Status](https://travis-ci.org/modflowpy/flopy.svg?branch=develop)](https://travis-ci.org/modflowpy/flopy)
[![PyPI Version](https://img.shields.io/pypi/v/flopy.png)](https://pypi.python.org/pypi/flopy)
[![Coverage Status](https://coveralls.io/repos/github/modflowpy/flopy/badge.svg?branch=develop)](https://coveralls.io/github/modflowpy/flopy?branch=develop)
diff --git a/autotest/t042_test.py b/autotest/t042_test.py
index df37a0d6ea..29287a5027 100644
--- a/autotest/t042_test.py
+++ b/autotest/t042_test.py
@@ -6,7 +6,8 @@
sys.path.append('/Users/aleaf/Documents/GitHub/flopy3')
import numpy as np
import flopy
-from flopy.utils.postprocessing import get_transmissivities, get_water_table, get_gradients, get_saturated_thickness
+from flopy.utils.postprocessing import get_transmissivities, get_water_table, \
+ get_gradients, get_saturated_thickness
mf = flopy.modflow
diff --git a/code.json b/code.json
index bb29b4f185..0e7ad6f528 100755
--- a/code.json
+++ b/code.json
@@ -29,7 +29,7 @@
"downloadURL": "https://code.usgs.gov/usgs/modflow/flopy/archive/master.zip",
"vcs": "git",
"laborHours": -1,
- "version": "3.2.9.216",
+ "version": "3.2.9.218",
"date": {
"metadataLastUpdated": "2018-10-17"
},
diff --git a/flopy/export/metadata.py b/flopy/export/metadata.py
index db4749e8b9..23925e557d 100644
--- a/flopy/export/metadata.py
+++ b/flopy/export/metadata.py
@@ -8,23 +8,31 @@
class acdd:
- """Translate ScienceBase global metadata attributes to CF and ACDD
+ """
+ Translate ScienceBase global metadata attributes to CF and ACDD
global attributes.
-
- see:
- https://www.sciencebase.gov/catalog/
- http://cfconventions.org/cf-conventions/v1.6.0/cf-conventions.html#description-of-file-contents
- http://wiki.esipfed.org/index.php/Attribute_Convention_for_Data_Discovery
-
+
Parameters
----------
+
sciencebase_id : str
Unique identifier for ScienceBase record (e.g. 582da7efe4b04d580bd37be8)
model : flopy model object
-
+ Model object
+
+ References
+ ----------
+
+ https://www.sciencebase.gov/catalog/
+ http://cfconventions.org/cf-conventions/v1.6.0/cf-conventions.html#description-of-file-contents
+ http://wiki.esipfed.org/index.php/Attribute_Convention_for_Data_Discovery
+
"""
def __init__(self, sciencebase_id, model):
+ """
+ Class constructor
+ """
self.id = sciencebase_id
self.model = model
@@ -120,23 +128,34 @@ def creator_url(self):
@property
def geospatial_bounds(self):
- """Describes the data's 2D or 3D geospatial extent in OGC's Well-Known Text (WKT) Geometry format"""
- return 'POLYGON (({0} {2}, {0} {3}, {1} {3}, {1} {2}, {0} {2}))'.format(
- self.geospatial_lon_min,
- self.geospatial_lon_max,
- self.geospatial_lat_min,
- self.geospatial_lat_max
- )
+ """
+ Describes the data's 2D or 3D geospatial extent in OGC's Well-Known
+ Text (WKT) Geometry format
+ """
+ fmt = '(({0} {2}, {0} {3}, {1} {3}, {1} {2}, {0} {2}))'
+ bounds = 'POLYGON ' + fmt.format(self.geospatial_lon_min,
+ self.geospatial_lon_max,
+ self.geospatial_lat_min,
+ self.geospatial_lat_max)
+ return bounds
@property
def geospatial_bounds_vertical_crs(self):
- """The vertical coordinate reference system (CRS)
- for the Z axis of the point coordinates in the geospatial_bounds attribute. """
+ """
+ The vertical coordinate reference system (CRS) for the Z axis of
+ the point coordinates in the geospatial_bounds attribute.
+ """
epsg = {'NGVD29': 'EPSG:5702', 'NAVD88': 'EPSG:5703'}
return epsg.get(self.vertical_datum)
@property
def references(self):
+ """
+
+ Returns
+ -------
+
+ """
r = [self.citation]
links = [d.get('uri') for d in self.sb['webLinks']
if 'link' in d.get('type').lower()]
@@ -144,6 +163,12 @@ def references(self):
@property
def time_coverage(self):
+ """
+
+ Returns
+ -------
+
+ """
l = self.sb['dates']
tc = {}
for t in ['start', 'end']:
@@ -162,7 +187,9 @@ def time_coverage(self):
@property
def vertical_datum(self):
- """try to parse the vertical datum from the xml info"""
+ """
+ Try to parse the vertical datum from the xml info
+ """
altdatum = self._get_xml_attribute('altdatum')
if altdatum is not None:
if '88' in altdatum:
@@ -174,7 +201,9 @@ def vertical_datum(self):
@property
def xmlroot(self):
- """ElementTree root element object for xml metadata"""
+ """
+ ElementTree root element object for xml metadata
+ """
try:
return self.get_sciencebase_xml_metadata()
except:
@@ -185,9 +214,10 @@ def xmlfile(self):
return self.sb['identifiers'][0].get('key')
def get_sciencebase_metadata(self, id):
- """Gets metadata json text for given ID from sciencebase.gov; loads
+ """
+ Gets metadata json text for given ID from sciencebase.gov; loads
into python dictionary. Fetches the reference text using the url:
- https://www.sciencebase.gov/catalog/item/?format=json
+ https://www.sciencebase.gov/catalog/item/?format=json
Parameters
----------
@@ -205,13 +235,14 @@ def get_sciencebase_metadata(self, id):
import json
from flopy.utils.flopy_io import get_url_text
- text = get_url_text(url,
- error_msg='Need an internet connection to get metadata from ScienceBase.')
+ msg = 'Need an internet connection to get metadata from ScienceBase.'
+ text = get_url_text(url, error_msg=msg)
if text is not None:
return json.loads(text)
def get_sciencebase_xml_metadata(self):
- """Gets xml from sciencebase.gov, using XML url obtained
+ """
+ Gets xml from sciencebase.gov, using XML url obtained
from json using get_sciencebase_metadata().
Parameters
@@ -229,6 +260,6 @@ def get_sciencebase_xml_metadata(self):
from flopy.utils.flopy_io import get_url_text
url = self.xmlfile
- text = get_url_text(url,
- error_msg='Need an internet connection to get metadata from ScienceBase.')
+ msg = 'Need an internet connection to get metadata from ScienceBase.'
+ text = get_url_text(url, error_msg=msg)
return ET.fromstring(text)
diff --git a/flopy/mbase.py b/flopy/mbase.py
index 84068f4723..68db254c8a 100644
--- a/flopy/mbase.py
+++ b/flopy/mbase.py
@@ -649,9 +649,12 @@ def remove_external(self, fname=None, unit=None):
def add_existing_package(self, filename, ptype=None,
copy_to_model_ws=True):
- """ add an existing package to a model instance.
+ """
+ Add an existing package to a model instance.
+
Parameters
----------
+
filename : str
the name of the file to add as a package
ptype : optional
@@ -659,6 +662,11 @@ def add_existing_package(self, filename, ptype=None,
then the file extension of the filename arg is used
copy_to_model_ws : bool
flag to copy the package file into the model_ws directory.
+
+ Returns
+ -------
+ None
+
"""
if ptype is None:
ptype = filename.split('.')[-1]
diff --git a/flopy/mf6/mfbase.py b/flopy/mf6/mfbase.py
index 2832e2d4e5..b67ff73ad3 100644
--- a/flopy/mf6/mfbase.py
+++ b/flopy/mf6/mfbase.py
@@ -182,11 +182,13 @@ class MFFileMgmt(object):
Parameters
----------
+
path : string
path on disk to the simulation
Attributes
----------
+
sim_path : string
path to the simulation
model_relative_path : OrderedDict
@@ -194,10 +196,12 @@ class MFFileMgmt(object):
Methods
-------
+
get_model_path : (key : string) : string
returns the model working path for the model key
- set_sim_path
+ set_sim_path : string
sets the simulation working path
+
"""
def __init__(self, path):
self._sim_path = ''
diff --git a/flopy/mf6/utils/binaryfile_utils.py b/flopy/mf6/utils/binaryfile_utils.py
index 8eac3d852c..81e1d53c6a 100644
--- a/flopy/mf6/utils/binaryfile_utils.py
+++ b/flopy/mf6/utils/binaryfile_utils.py
@@ -217,8 +217,8 @@ def _get_vertices(mfdict, key):
try:
import pandas as pd
except Exception as e:
- print("this feature requires pandas")
- return None
+ msg = 'MFOutputRequester._get_vertices(): requires pandas'
+ raise ImportError(msg)
mname = key[0]
cellid = mfdict[(mname, 'DISV8', 'CELL2D', 'cell2d_num')]
diff --git a/flopy/modflow/mffhb.py b/flopy/modflow/mffhb.py
index df2c364aed..cdfa32185e 100644
--- a/flopy/modflow/mffhb.py
+++ b/flopy/modflow/mffhb.py
@@ -78,6 +78,7 @@ class ModflowFhb(Package):
[lay, row, col, iaux, flwrat1, flwra2, ..., flwrat(nbdtime)],
[lay, row, col, iaux, flwrat1, flwra2, ..., flwrat(nbdtime)]
]
+
Note there should be nflw rows in ds7.
cnstm7 : float
@@ -96,6 +97,7 @@ class ModflowFhb(Package):
[lay, row, col, iaux, sbhed1, sbhed2, ..., sbhed(nbdtime)],
[lay, row, col, iaux, sbhed1, sbhed2, ..., sbhed(nbdtime)]
]
+
Note there should be nhed rows in ds7.
extension : string
diff --git a/flopy/modflow/mfhob.py b/flopy/modflow/mfhob.py
index c9457ba87c..6c842ca195 100755
--- a/flopy/modflow/mfhob.py
+++ b/flopy/modflow/mfhob.py
@@ -446,7 +446,7 @@ class HeadObservation(object):
is the zero-based layer index of the cell in which the head observation
is located. If layer is less than zero, hydraulic heads from multiple
layers are combined to calculate a simulated value. The number of
- layers equals the absolute value of layer, or |layer|. Default is 0.
+ layers equals the absolute value of layer, or abs(layer). Default is 0.
row : int
zero-based row index for the observation. Default is 0.
column : int
@@ -464,7 +464,7 @@ class HeadObservation(object):
observations. itt = 1 specified for heads and itt = 2 specified
if initial value is head and subsequent changes in head. Only
specified if irefsp is < 0. Default is 1.
- mlay : dictionary of length (|irefsp|)
+ mlay : dictionary of length (abs(irefsp))
key represents zero-based layer numbers for multilayer observations an
value represents the fractional value for each layer of multilayer
observations. If mlay is None, a default mlay of {0: 1.} will be
diff --git a/flopy/modflow/mflak.py b/flopy/modflow/mflak.py
index 6ddd626237..45943b28e8 100644
--- a/flopy/modflow/mflak.py
+++ b/flopy/modflow/mflak.py
@@ -26,169 +26,195 @@ class ModflowLak(Package):
this package will be added.
nlakes : int
NLAKES Number of separate lakes.
- Sublakes of multiple-lake systems are considered separate lakes for input purposes.
- The variable NLAKES is used, with certain internal assumptions and approximations,
- to dimension arrays for the simulation.
+ Sublakes of multiple-lake systems are considered separate lakes for
+ input purposes. The variable NLAKES is used, with certain internal
+ assumptions and approximations, to dimension arrays for the simulation.
ipakcb : int
(ILKCB in MODFLOW documentation)
- Whether or not to write cell-by-cell flows (yes if ILKCB> 0, no otherwise).
- If ILKCB< 0 and "Save Budget" is specified in the Output Control or ICBCFL is not equal to 0,
- the cell-by-cell flows will be printed in the standard output file.
- ICBCFL is specified in the input to the Output Control Option of MODFLOW.
+ Whether or not to write cell-by-cell flows (yes if ILKCB> 0, no
+ otherwise). If ILKCB< 0 and "Save Budget" is specified in the Output
+ Control or ICBCFL is not equal to 0, the cell-by-cell flows will be
+ printed in the standard output file. ICBCFL is specified in the input
+ to the Output Control Option of MODFLOW.
theta : float
- Explicit (THETA = 0.0), semi-implicit (0.0 < THETA < 1.0), or implicit (THETA = 1.0)
- solution for lake stages.
- SURFDEPTH is read only if THETA is assigned a negative value
- (the negative value of THETA is then changed to a positive value internally by the code).
- * A new method of solving for lake stage uses only the time-weighting factor THETA
- (Merritt and Konikow, 2000, p. 52) for transient simulations. THETA is automatically set
- to a value of 1.0 for all steady-state stress periods. For transient stress periods,
- Explicit (THETA = 0.0), semi-implicit (0.0 < THETA < 1.0), or implicit (THETA = 1.0)
- solutions can be used to calculate lake stages. The option to specify negative values
- for THETA is supported to allow specification of additional variables (NSSITER, SSCNCR, SURFDEP)
- for simulations that only include transient stress periods. If THETA is specified as a negative value,
- then it is converted to a positive value for calculations of lake stage.
- * In MODFLOW-2000 and later, ISS is not part of the input. Instead NSSITR or SSCNCR
- should be included if one or more stress periods is a steady state stress period as
- defined in Ss/tr in the Discretization file.
- * SSCNCR and NSSITR can be read for a transient only simulation by placing a
- negative sign immeditately in front of THETA. A negative THETA sets a flag which assumes
- input values for NSSITR and SSCNCR will follow THETA in the format as described by Merritt
- and Konikow (p. 52). A negative THETA is automatically reset to a positive value after
- values of NSSITR and SSCNCR are read.
+ Explicit (THETA = 0.0), semi-implicit (0.0 < THETA < 1.0), or implicit
+ (THETA = 1.0) solution for lake stages. SURFDEPTH is read only if
+ THETA is assigned a negative value (the negative value of THETA is
+ then changed to a positive value internally by the code).
+ * A new method of solving for lake stage uses only the time-weighting
+ factor THETA (Merritt and Konikow, 2000, p. 52) for transient
+ simulations. THETA is automatically set to a value of 1.0 for all
+ steady-state stress periods. For transient stress periods, Explicit
+ (THETA = 0.0), semi-implicit (0.0 < THETA < 1.0), or implicit
+ (THETA = 1.0) solutions can be used to calculate lake stages. The
+ option to specify negative values for THETA is supported to allow
+ specification of additional variables (NSSITER, SSCNCR, SURFDEP)
+ for simulations that only include transient stress periods. If
+ THETA is specified as a negative value, then it is converted to a
+ positive value for calculations of lake stage.
+ * In MODFLOW-2000 and later, ISS is not part of the input. Instead
+ NSSITR or SSCNCR should be included if one or more stress periods
+ is a steady state stress period as defined in Ss/tr in the
+ Discretization file.
+ * SSCNCR and NSSITR can be read for a transient only simulation by
+ placing a negative sign immeditately in front of THETA. A negative
+ THETA sets a flag which assumes input values for NSSITR and SSCNCR
+ will follow THETA in the format as described by Merritt and Konikow
+ (p. 52). A negative THETA is automatically reset to a positive
+ value after values of NSSITR and SSCNCR are read.
nssitr : int
- Maximum number of iterations for Newton's method of solution for equilibrium lake stages
- in each MODFLOW iteration for steady-state aquifer head solution. Only read if ISS
- (option flag input to DIS Package of MODFLOW indicating steady-state solution)
- is not zero or if THETA is specified as a negative value.
+ Maximum number of iterations for Newton's method of solution for
+ equilibrium lake stages in each MODFLOW iteration for steady-state
+ aquifer head solution. Only read if ISS (option flag input to DIS
+ Package of MODFLOW indicating steady-state solution) is not zero or
+ if THETA is specified as a negative value.
* NSSITR and SSCNCR may be omitted for transient solutions (ISS = 0).
* In MODFLOW-2000 and later, ISS is not part of the input.
- Instead NSSITR or SSCNCR should be included if one or more stress periods is a
- steady state stress period as defined in Ss/tr in the Discretization file.
- * SSCNCR and NSSITR can be read for a transient only simulation by placing a negative sign
- immeditately in front of THETA. A negative THETA sets a flag which assumes input values
- for NSSITR and SSCNCR will follow THETA in the format as described by Merritt and Konikow
- (p. 52). A negative THETA is automatically reset to a positive value after values
- of NSSITR and SSCNCR are read.
+ Instead NSSITR or SSCNCR should be included if one or more stress
+ periods is a steady state stress period as defined in Ss/tr in the
+ Discretization file.
+ * SSCNCR and NSSITR can be read for a transient only simulation by
+ placing a negative sign immeditately in front of THETA. A negative
+ THETA sets a flag which assumes input values for NSSITR and SSCNCR
+ will follow THETA in the format as described by Merritt and Konikow
+ (p. 52). A negative THETA is automatically reset to a positive
+ value after values of NSSITR and SSCNCR are read.
* If NSSITR = 0, a value of 100 will be used instead.
sscncr : float
- Convergence criterion for equilibrium lake stage solution by Newton's method.
- Only read if ISS is not zero or if THETA is specified as a negative value.
- see notes above for nssitr.
+ Convergence criterion for equilibrium lake stage solution by Newton's
+ method. Only read if ISS is not zero or if THETA is specified as a
+ negative value. See notes above for nssitr.
surfdepth : float
- The height of small topological variations (undulations) in lake-bottom elevations
- that can affect groundwater discharge to lakes.
- SURFDEPTH decreases the lakebed conductance for vertical flow across a horizontal lakebed
- caused both by a groundwater head that is between the lakebed and the
- lakebed plus SURFDEPTH and a lake stage that is also between the lakebed and the
- lakebed plus SURFDEPTH. This method provides a smooth transition from a condition
- of no groundwater discharge to a lake, when groundwater head is below the lakebed,
- to a condition of increasing groundwater discharge to a lake as groundwater head
- becomes greater than the elevation of the dry lakebed. The method also allows
- for the transition of seepage from a lake to groundwater when the lake stage decreases
- to the lakebed elevation. Values of SURFDEPTH ranging from 0.01 to 0.5 have been used
- successfully in test simulations. SURFDEP is read only if THETA is specified
- as a negative value.
+ The height of small topological variations (undulations) in lake-bottom
+ elevations that can affect groundwater discharge to lakes. SURFDEPTH
+ decreases the lakebed conductance for vertical flow across a horizontal
+ lakebed caused both by a groundwater head that is between the lakebed
+ and the lakebed plus SURFDEPTH and a lake stage that is also between
+ the lakebed and the lakebed plus SURFDEPTH. This method provides a
+ smooth transition from a condition of no groundwater discharge to a
+ lake, when groundwater head is below the lakebed, to a condition of
+ increasing groundwater discharge to a lake as groundwater head becomes
+ greater than the elevation of the dry lakebed. The method also allows
+ for the transition of seepage from a lake to groundwater when the lake
+ stage decreases to the lakebed elevation. Values of SURFDEPTH ranging
+ from 0.01 to 0.5 have been used successfully in test simulations.
+ SURFDEP is read only if THETA is specified as a negative value.
stages : float or list of floats
The initial stage of each lake at the beginning of the run.
stage_range : list of tuples (ssmn, ssmx) of length nlakes
- Where ssmn and ssmx are the minimum and maximum stages allowed for each lake
- in steady-state solution.
+ Where ssmn and ssmx are the minimum and maximum stages allowed for each
+ lake in steady-state solution.
* SSMN and SSMX are not needed for a transient run and must be omitted
when the solution is transient.
* When the first stress period is a steady-state stress period,
SSMN is defined in record 3.
- For subsequent steady-state stress periods, SSMN is defined in record 9a.
+
+ For subsequent steady-state stress periods, SSMN is defined in
+ record 9a.
lakarr : array of integers (nlay, nrow, ncol)
LKARR A value is read in for every grid cell.
If LKARR(I,J,K) = 0, the grid cell is not a lake volume cell.
- If LKARR(I,J,K) > 0, its value is the identification number of the
- lake occupying the grid cell. LKARR(I,J,K) must not exceed the value NLAKES.
+ If LKARR(I,J,K) > 0, its value is the identification number of the lake
+ occupying the grid cell. LKARR(I,J,K) must not exceed the value NLAKES.
If it does, or if LKARR(I,J,K) < 0, LKARR(I,J,K) is set to zero.
Lake cells cannot be overlain by non-lake cells in a higher layer.
Lake cells must be inactive cells (IBOUND = 0) and should not be
convertible to active cells (WETDRY = 0).
+
The Lake package can be used when all or some of the model layers
containing the lake are confined. The authors recommend using the
Layer-Property Flow Package (LPF) for this case, although the
- BCF and HUF Packages will work too. However, when using the BCF6 package
- to define aquifer properties, lake/aquifer conductances in the lateral direction
- are based solely on the lakebed leakance (and not on the lateral transmissivity
- of the aquifer layer). As before, when the BCF6 package is used,
- vertical lake/aquifer conductances are based on lakebed conductance and
- on the vertical hydraulic conductivity of the aquifer layer underlying
- the lake when the wet/dry option is implemented, and only on the lakebed leakance when
- the wet/dry option is not implemented.
+ BCF and HUF Packages will work too. However, when using the BCF6
+ package to define aquifer properties, lake/aquifer conductances in the
+ lateral direction are based solely on the lakebed leakance (and not on
+ the lateral transmissivity of the aquifer layer). As before, when the
+ BCF6 package is used, vertical lake/aquifer conductances are based on
+ lakebed conductance and on the vertical hydraulic conductivity of the
+ aquifer layer underlying the lake when the wet/dry option is
+ implemented, and only on the lakebed leakance when the wet/dry option
+ is not implemented.
bdlknc : array of floats (nlay, nrow, ncol)
- BDLKNC A value is read in for every grid cell. The value is the lakebed leakance
- that will be assigned to lake/aquifer interfaces that occur in the
- corresponding grid cell.
- If the wet-dry option flag (IWDFLG) is not active (cells cannot rewet if they become dry),
- then the BDLKNC values are assumed to represent the combined leakances of the
- lakebed material and the aquifer material between the lake and the centers of the
- underlying grid cells, i. e., the vertical conductance values (CV) will not be used
- in the computation of conductances across lake/aquifer boundary faces in the
- vertical direction.
- IBOUND and WETDRY should be set to zero for every cell for which LKARR is not equal to zero.
- IBOUND is defined in the input to the Basic Package of MODFLOW).
- WETDRY is defined in the input to the BCF or other flow package of MODFLOW
- if the IWDFLG option is active.
- When used with the HUF package, the Lake Package has been modified to compute effective
- lake-aquifer conductance solely on the basis of the user-specified value of lakebed leakance;
- aquifer hydraulic conductivities are not used in this calculation.
- An appropriate informational message is now printed after the lakebed conductances
+ BDLKNC A value is read in for every grid cell. The value is the lakebed
+ leakance that will be assigned to lake/aquifer interfaces that occur
+ in the corresponding grid cell. If the wet-dry option flag (IWDFLG) is
+ not active (cells cannot rewet if they become dry), then the BDLKNC
+ values are assumed to represent the combined leakances of the lakebed
+ material and the aquifer material between the lake and the centers of
+ the underlying grid cells, i. e., the vertical conductance values (CV)
+ will not be used in the computation of conductances across lake/aquifer
+ boundary faces in the vertical direction.
+
+ IBOUND and WETDRY should be set to zero for every cell for which LKARR
+ is not equal to zero. IBOUND is defined in the input to the Basic
+ Package of MODFLOW. WETDRY is defined in the input to the BCF or other
+ flow package of MODFLOW if the IWDFLG option is active. When used with
+ the HUF package, the Lake Package has been modified to compute
+ effective lake-aquifer conductance solely on the basis of the
+ user-specified value of lakebed leakance; aquifer hydraulic
+ conductivities are not used in this calculation. An appropriate
+ informational message is now printed after the lakebed conductances
are written to the main output file.
sill_data : dict
(dataset 8 in documentation)
- Dict of lists keyed by stress period. Each list has a tuple of dataset 8a, 8b
- for every multi-lake system, where dataset 8a is another tuple of
- IC : int
- The number of sublakes
- ISUB : list of ints
- The identification numbers of the sublakes in the sublake system
- being described in this record. The center lake number is listed first.
+ Dict of lists keyed by stress period. Each list has a tuple of dataset
+ 8a, 8b for every multi-lake system, where dataset 8a is another tuple of
+ IC : int
+ The number of sublakes
+ ISUB : list of ints
+ The identification numbers of the sublakes in the sublake
+ system being described in this record. The center lake number
+ is listed first.
And dataset 8b contains
- SILLVT : sequnce of floats
- A sequence of sill elevations for each sublakes that determines whether
- the center lake is connected with a given sublake. Values are entered
- for each sublake in the order the sublakes are listed in the previous record.
+ SILLVT : sequnce of floats
+ A sequence of sill elevations for each sublakes that determines
+ whether the center lake is connected with a given sublake.
+ Values are entered for each sublake in the order the sublakes
+ are listed in the previous record.
flux_data : dict
(dataset 9 in documentation)
- Dict of lists keyed by stress period. The list for each stress period is a list
- of lists, with each list containing the variables PRCPLK EVAPLK RNF WTHDRW [SSMN] [SSMX]
- from the documentation.
- PRCPLK : float
- The rate of precipitation per unit area at the surface of a lake (L/T).
- EVAPLK : float
- The rate of evaporation per unit area from the surface of a lake (L/T).
- RNF : float
- Overland runoff from an adjacent watershed entering the lake.
- If RNF > 0, it is specified directly as a volumetric rate, or flux (L3 /T).
- If RNF < 0, its absolute value is used as a dimensionless multiplier applied
- to the product of the lake precipitation rate per unit area (PRCPLK) and the
- surface area of the lake at its full stage (occupying all layer 1 lake cells).
- When RNF is entered as a dimensionless multiplier (RNF < 0),
- it is considered to be the product of two proportionality factors.
- The first is the ratio of the area of the basin contributing runoff to the
- surface area of the lake when it is at full stage. The second is the fraction
- of the current rainfall rate that becomes runoff to the lake. This procedure
- provides a means for the automated computation of runoff rate from a watershed
- to a lake as a function of varying rainfall rate. For example, if the basin area
- is 10 times greater than the surface area of the lake, and 20 percent of the
- precipitation on the basin becomes overland runoff directly into the lake,
- then set RNF = -2.0.
- WTHDRW : float
- The volumetric rate, or flux (L3 /T), of water removal from a lake by means
- other than rainfall, evaporation, surface outflow, or groundwater seepage.
- A negative value indicates augmentation. Normally, this would be used to specify
- the rate of artificial withdrawal from a lake for human water use, or if negative,
- artificial augmentation of a lake volume for esthetic or recreational purposes.
- SSMN : float
- Minimum stage allowed for each lake in steady-state solution.
- See notes on ssmn and ssmx above.
- SSMX : float
- SSMX Maximum stage allowed for each lake in steady-state solution.
-
+ Dict of lists keyed by stress period. The list for each stress period
+ is a list of lists, with each list containing the variables
+ PRCPLK EVAPLK RNF WTHDRW [SSMN] [SSMX] from the documentation.
+ PRCPLK : float
+ The rate of precipitation per unit area at the surface of a
+ lake (L/T).
+ EVAPLK : float
+ The rate of evaporation per unit area from the surface of a
+ lake (L/T).
+ RNF : float
+ Overland runoff from an adjacent watershed entering the lake.
+ If RNF > 0, it is specified directly as a volumetric rate, or
+ flux (L3 /T). If RNF < 0, its absolute value is used as a
+ dimensionless multiplier applied to the product of the lake
+ precipitation rate per unit area (PRCPLK) and the surface area
+ of the lake at its full stage (occupying all layer 1 lake
+ cells). When RNF is entered as a dimensionless multiplier
+ (RNF < 0), it is considered to be the product of two
+ proportionality factors. The first is the ratio of the area of
+ the basin contributing runoff to the surface area of the lake
+ when it is at full stage. The second is the fraction of the
+ current rainfall rate that becomes runoff to the lake. This
+ procedure provides a means for the automated computation of
+ runoff rate from a watershed to a lake as a function of
+ varying rainfall rate. For example, if the basin area is 10
+ times greater than the surface area of the lake, and 20 percent
+ of the precipitation on the basin becomes overland runoff
+ directly into the lake, then set RNF = -2.0.
+ WTHDRW : float
+ The volumetric rate, or flux (L3 /T), of water removal from a
+ lake by means other than rainfall, evaporation, surface
+ outflow, or groundwater seepage. A negative value indicates
+ augmentation. Normally, this would be used to specify the
+ rate of artificial withdrawal from a lake for human water use,
+ or if negative, artificial augmentation of a lake volume for
+ esthetic or recreational purposes.
+ SSMN : float
+ Minimum stage allowed for each lake in steady-state solution.
+ See notes on ssmn and ssmx above.
+ SSMX : float
+ SSMX Maximum stage allowed for each lake in steady-state
+ solution.
+
options : list of strings
Package options. (default is None).
extension : string
@@ -324,7 +350,7 @@ def __init__(self, model, nlakes=1, ipakcb=None, theta=-1.,
if isinstance(stages, float):
if self.nlakes == 1:
stages = np.array([self.nlakes], dtype=np.float) * stages
- else:
+ else:
stages = np.ones(self.nlakes, dtype=float) * stages
elif isinstance(stages, list):
stages = np.array(stages)
@@ -477,7 +503,8 @@ def write_file(self):
f.write(write_fixed_var(t, ipos=ipos,
free=self.parent.free_format_input))
- ds8_keys = list(self.sill_data.keys()) if self.sill_data is not None else []
+ ds8_keys = list(
+ self.sill_data.keys()) if self.sill_data is not None else []
ds9_keys = list(self.flux_data.keys())
nper = self.parent.dis.steady.shape[0]
for kper in range(nper):
diff --git a/flopy/modflow/mfmnw2.py b/flopy/modflow/mfmnw2.py
index ff74cca74c..e50a74c5b3 100644
--- a/flopy/modflow/mfmnw2.py
+++ b/flopy/modflow/mfmnw2.py
@@ -14,7 +14,8 @@
class Mnw(object):
- """Multi-Node Well object class
+ """
+ Multi-Node Well object class
Parameters
----------
@@ -273,6 +274,11 @@ class Mnw(object):
specified previously on this line. Sign (positive or negative) is ignored.
mnwpackage : ModflowMnw2 instance
package that mnw is attached to
+
+ Returns
+ -------
+ None
+
"""
by_node_variables = ['k', 'i', 'j', 'ztop', 'zbotm', 'rw', 'rskin',
'kskin', 'B', 'C', 'P', 'cwc', 'pp']
@@ -287,8 +293,11 @@ def __init__(self, wellid,
pumplay=0, pumprow=0, pumpcol=0, zpump=None,
hlim=None, qcut=None, qfrcmn=None, qfrcmx=None,
hlift=None, liftq0=None, liftqmax=None, hwtol=None,
- liftn=None, qn=None, mnwpackage=None,
- ):
+ liftn=None, qn=None, mnwpackage=None):
+ """
+ Class constructor
+ """
+
self.nper = nper
self.mnwpackage = mnwpackage # associated ModflowMnw2 instance
self.aux = None if mnwpackage is None else mnwpackage.aux
@@ -368,7 +377,14 @@ def __init__(self, wellid,
self.stress_period_data[n] = [self.__dict__[n][0]]
def make_node_data(self):
- """Makes the node data array from variables entered individually."""
+ """
+ Make the node data array from variables entered individually.
+
+ Returns
+ -------
+ None
+
+ """
nnodes = self.nnodes
node_data = ModflowMnw2.get_empty_node_data(np.abs(nnodes),
aux_names=self.aux)
@@ -381,14 +397,46 @@ def make_node_data(self):
@staticmethod
def get_empty_stress_period_data(nper=0, aux_names=None, structured=True,
default_value=0):
- # get an empty recarray that correponds to dtype
+ """
+ Get an empty stress_period_data recarray that correponds to dtype
+
+ Parameters
+ ----------
+ nper : int
+
+ aux_names
+ structured
+ default_value
+
+ Returns
+ -------
+ ra : np.recarray
+ Recarray
+
+ """
+ #
dtype = Mnw.get_default_spd_dtype(structured=structured)
if aux_names is not None:
dtype = Package.add_to_dtype(dtype, aux_names, np.float32)
- return create_empty_recarray(nper, dtype)
+ return create_empty_recarray(nper, dtype, default_value=default_value)
@staticmethod
def get_default_spd_dtype(structured=True):
+ """
+ Get the default stress period data dtype
+
+ Parameters
+ ----------
+ structured : bool
+ Boolean that defines if a structured (True) or unstructured (False)
+ dtype will be created (default is True). Not implemented for
+ unstructured.
+
+ Returns
+ -------
+ dtype : np.dtype
+
+ """
if structured:
return np.dtype([('k', np.int),
('i', np.int),
@@ -402,54 +450,27 @@ def get_default_spd_dtype(structured=True):
('qfrcmn', np.float32),
('qfrcmx', np.float32)])
else:
- pass
+ msg = 'Mnw2: get_default_spd_dtype not implemented for ' + \
+ 'unstructured grids'
+ raise NotImplementedError(msg)
@staticmethod
def get_item2_names(mnw2obj=None, node_data=None):
- """Determine which variables are being used.
+ """
+ Get names for unknown things...
+
+ Parameters
+ ----------
+ mnw2obj : Mnw object
+ Mnw object (default is None)
+ node_data : unknown
+ Unknown what is in this parameter (default is None)
Returns
-------
- names : list of str
- List of names (same as variables in MNW2 Package input instructions) of columns
- to assign (upon load) or retain (upon write) in reach_data array.
+ names : list
+ List of dtype names.
- Note
- ----
- Lowercase is used for all variable names.
-
- ztop : float
- top elevation of open intervals of vertical well.
- zbotm : float
- bottom elevation of open intervals of vertical well.
- wellid : str
- losstyp : str
- pumploc : int
- qlimit : int
- ppflag : int
- pumpcap : int
- rw : float
- rskin : float
- kskin : float
- B : float
- C : float
- P : float
- cwc : float
- pp : float
- pumplay : int
- pumprow : int
- pumpcol : int
- zpump : float
- hlim : float
- qcut : int
- qfrcmn : float
- qfrcmx : float
- hlift : float
- liftq0 : float
- liftqmax : float
- hwtol : float
- liftn : float
- qn : float
"""
if node_data is not None:
@@ -460,7 +481,8 @@ def get_item2_names(mnw2obj=None, node_data=None):
qlimit = node_data.qlimit[0]
pumpcap = node_data.pumpcap[0]
qcut = node_data.qcut[0]
- else: # get names based on mnw2obj attribute values
+ # get names based on mnw2obj attribute values
+ else:
nnodes = mnw2obj.nnodes
losstype = mnw2obj.losstype
ppflag = mnw2obj.ppflag
@@ -502,6 +524,20 @@ def get_item2_names(mnw2obj=None, node_data=None):
@staticmethod
def get_nnodes(node_data):
+ """
+ Get the number of MNW2 nodes.
+
+ Parameters
+ ----------
+ node_data : list
+ List of nodes???
+
+ Returns
+ -------
+ nnodes : int
+ Number of MNW2 nodes
+
+ """
nnodes = len(node_data)
# check if ztop and zbotm were entered,
# flip nnodes for format 2
@@ -542,7 +578,9 @@ def check(self, f=None, verbose=True, level=1):
return chk
def _set_attributes_from_node_data(self):
- """Populates the Mnw object attributes with values from node_data table."""
+ """
+ Populates the Mnw object attributes with values from node_data table.
+ """
names = Mnw.get_item2_names(node_data=self.node_data)
for n in names:
# assign by node variables as lists if they are being included
@@ -552,11 +590,22 @@ def _set_attributes_from_node_data(self):
self.__dict__[n] = self.node_data[n][0]
def _write_2(self, f_mnw, float_format=' {:15.7E}', indent=12):
- """write out dataset 2 for MNW.
+ """
+ Write out dataset 2 for MNW.
Parameters
----------
f_mnw : package file handle
+ file handle for MNW2 input file
+ float_format : str
+ python format statement for floats (default is ' {:15.7E}').
+ indent : int
+ number of spaces to indent line (default is 12).
+
+ Returns
+ -------
+ None
+
"""
# update object attributes with values from node_data
self._set_attributes_from_node_data()
@@ -808,7 +857,7 @@ def __init__(self, model, mnwmax=0, nodtot=None, ipakcb=0, mnwprnt=0,
self.nper = 1 if self.nper == 0 else self.nper # otherwise iterations from 0, nper won't run
self.structured = self.parent.structured
- # Dataset 0 -----------------------------------------------------------------------
+ # Dataset 0
self.heading = '# {} package for '.format(self.name[0]) + \
' {}, '.format(model.version_types[model.version]) + \
'generated by Flopy.'
@@ -884,20 +933,49 @@ def _add_kij_to_stress_period_data(self):
@staticmethod
def get_empty_node_data(maxnodes=0, aux_names=None, structured=True,
default_value=0):
- """get an empty recarray that correponds to dtype
+ """
+ get an empty recarray that correponds to dtype
Parameters
----------
maxnodes : int
- Total number of nodes to be simulated
+ Total number of nodes to be simulated (default is 0)
+ aux_names : list
+ List of aux name strings (default is None)
+ structured : bool
+ Boolean indicating if a structured (True) or unstructured (False)
+ model (default is True).
+ default_value : float
+ Default value for float variables (default is 0).
+
+ Returns
+ -------
+ r : np.recarray
+ Recarray of default dtype of shape maxnode
"""
dtype = ModflowMnw2.get_default_node_dtype(structured=structured)
if aux_names is not None:
dtype = Package.add_to_dtype(dtype, aux_names, np.float32)
- return create_empty_recarray(maxnodes, dtype)
+ return create_empty_recarray(maxnodes, dtype,
+ default_value=default_value)
@staticmethod
def get_default_node_dtype(structured=True):
+ """
+ Get default dtype for node data
+
+ Parameters
+ ----------
+ structured : bool
+ Boolean indicating if a structured (True) or unstructured (False)
+ model (default is True).
+
+ Returns
+ -------
+ dtype : np.dtype
+ node data dtype
+
+ """
if structured:
return np.dtype([('k', np.int),
('i', np.int),
@@ -933,19 +1011,55 @@ def get_default_node_dtype(structured=True):
('liftn', np.float32),
('qn', np.float32)])
else:
- pass
+ msg = 'get_default_node_dtype: unstructured model not supported'
+ raise NotImplementedError(msg)
@staticmethod
def get_empty_stress_period_data(itmp=0, aux_names=None, structured=True,
default_value=0):
- # get an empty recarray that correponds to dtype
+ """
+ Get an empty stress period data recarray
+
+ Parameters
+ ----------
+ itmp : int
+ Number of entries in this stress period (default is 0).
+ aux_names : list
+ List of aux names (default is None).
+ structured : bool
+ Boolean indicating if a structured (True) or unstructured (False)
+ model (default is True).
+ default_value : float
+ Default value for float variables (default is 0).
+
+ Returns
+ -------
+ r : np.recarray
+ Recarray of default dtype of shape itmp
+
+ """
dtype = ModflowMnw2.get_default_spd_dtype(structured=structured)
if aux_names is not None:
dtype = Package.add_to_dtype(dtype, aux_names, np.float32)
- return create_empty_recarray(itmp, dtype)
+ return create_empty_recarray(itmp, dtype, default_value=default_value)
@staticmethod
def get_default_spd_dtype(structured=True):
+ """
+ Get default dtype for stress period data
+
+ Parameters
+ ----------
+ structured : bool
+ Boolean indicating if a structured (True) or unstructured (False)
+ model (default is True).
+
+ Returns
+ -------
+ dtype : np.dtype
+ node data dtype
+
+ """
if structured:
return np.dtype([('k', np.int),
('i', np.int),
@@ -959,10 +1073,36 @@ def get_default_spd_dtype(structured=True):
('qfrcmn', np.float32),
('qfrcmx', np.float32)])
else:
- pass
+ msg = 'get_default_spd_dtype: unstructured model not supported'
+ raise NotImplementedError(msg)
@staticmethod
def load(f, model, nper=None, gwt=False, nsol=1, ext_unit_dict=None):
+ """
+
+ Parameters
+ ----------
+ f : filename or file handle
+ File to load.
+ model : model object
+ The model object (of type :class:`flopy.modflow.ModflowMnw2`) to
+ which this package will be added.
+ nper : int
+ Number of periods
+ gwt : bool
+ nsol : int
+ ext_unit_dict : dictionary, optional
+ If the arrays in the file are specified using EXTERNAL,
+ or older style array control records, then `f` should be a file
+ handle. In this case ext_unit_dict is required, which can be
+ constructed using the function
+ :class:`flopy.utils.mfreadnam.parsenamefile`.
+
+
+ Returns
+ -------
+
+ """
if model.verbose:
sys.stdout.write('loading mnw2 package file...\n')
@@ -1074,7 +1214,7 @@ def check(self, f=None, verbose=True, level=1):
Returns
-------
- None
+ chk : check object
Examples
--------
@@ -1099,15 +1239,20 @@ def check(self, f=None, verbose=True, level=1):
return chk
def get_allnode_data(self):
- """Get a version of the node_data array that has all MNW2 nodes listed explicitly.
- For example, MNWs with open intervals encompassing multiple layers would have a row
- entry for each layer. Ztop and zbotm values indicate the top and bottom elevations of the node
- (these are the same as the layer top and bottom if the node fully penetrates that layer).
+ """
+ Get a version of the node_data array that has all MNW2 nodes listed
+ explicitly. For example, MNWs with open intervals encompassing
+ multiple layers would have a row entry for each layer. Ztop and zbotm
+ values indicate the top and bottom elevations of the node (these are
+ the same as the layer top and bottom if the node fully penetrates
+ that layer).
Returns
-------
allnode_data : np.recarray
- Numpy record array of same form as node_data, except each row represents only one node.
+ Numpy record array of same form as node_data, except each row
+ represents only one node.
+
"""
from numpy.lib.recfunctions import stack_arrays
@@ -1126,17 +1271,25 @@ def get_allnode_data(self):
rk = r.copy()
rk['k'] = k
if k > startK:
- rk['ztop'] = self.parent.dis.botm[
- k - 1, rk['i'], rk['j']]
+ loc = (k - 1, rk['i'], rk['j'])
+ rk['ztop'] = self.parent.dis.botm[loc]
if k < endK:
- rk['zbotm'] = self.parent.dis.botm[
- k, rk['i'], rk['j']]
+ loc = (k, rk['i'], rk['j'])
+ rk['zbotm'] = self.parent.dis.botm[loc]
nd.append(rk)
else:
nd.append(r)
return stack_arrays(nd, usemask=False).view(np.recarray)
def make_mnw_objects(self):
+ """
+ Make a Mnw object
+
+ Returns
+ -------
+ None
+
+ """
node_data = self.node_data
stress_period_data = self.stress_period_data
self.mnw = {}
@@ -1172,7 +1325,18 @@ def make_mnw_objects(self):
mnwpackage=self)
def make_node_data(self, mnwobjs):
- """Make node_data rec array from Mnw objects"""
+ """
+ Make node_data rec array from Mnw objects
+
+ Parameters
+ ----------
+ mnwobjs : Mnw object
+
+ Returns
+ -------
+ None
+
+ """
if isinstance(mnwobjs, dict):
mnwobjs = list(mnwobjs.values())
elif isinstance(mnwobjs, Mnw):
@@ -1184,7 +1348,18 @@ def make_node_data(self, mnwobjs):
self.node_data = node_data
def make_stress_period_data(self, mnwobjs):
- """make stress_period_data rec array from Mnw objects"""
+ """
+ Make stress_period_data rec array from Mnw objects
+
+ Parameters
+ ----------
+ mnwobjs : Mnw object
+
+ Returns
+ -------
+ None
+
+ """
if isinstance(mnwobjs, dict):
mnwobjs = list(mnwobjs.values())
elif isinstance(mnwobjs, Mnw):
@@ -1219,6 +1394,20 @@ def make_stress_period_data(self, mnwobjs):
dtype=stress_period_data[0].dtype)
def export(self, f, **kwargs):
+ """
+ Export MNW2 data
+
+ Parameters
+ ----------
+ f : file
+ kwargs
+
+ Returns
+ -------
+ e : export object
+
+
+ """
# A better strategy would be to build a single 4-D MfList
# (currently the stress period data array has everything in layer 0)
self.node_data_MfList = MfList(self, self.get_allnode_data(),
@@ -1254,6 +1443,19 @@ def export(self, f, **kwargs):
return super(ModflowMnw2, self).export(f, **kwargs)
def _write_1(self, f_mnw):
+ """
+
+ Parameters
+ ----------
+ f_mnw : file object
+ File object for MNW2 input file
+
+
+ Returns
+ -------
+ None
+
+ """
f_mnw.write('{:.0f} '.format(self.mnwmax))
if self.mnwmax < 0:
f_mnw.write('{:.0f} '.format(self.nodtot))
@@ -1268,6 +1470,12 @@ def write_file(self, filename=None, float_format=' {:15.7E}',
"""
Write the package file.
+ Parameters
+ ----------
+ filename : str
+ float_format
+ use_tables
+
Returns
-------
None
@@ -1347,6 +1555,16 @@ def defaultunit():
def _parse_1(line):
+ """
+
+ Parameters
+ ----------
+ line
+
+ Returns
+ -------
+
+ """
line = line_parse(line)
mnwmax = pop_item(line, int)
nodtot = None
@@ -1362,6 +1580,16 @@ def _parse_1(line):
def _parse_2(f):
+ """
+
+ Parameters
+ ----------
+ f
+
+ Returns
+ -------
+
+ """
# dataset 2a
line = line_parse(next(f))
if len(line) > 2:
@@ -1485,6 +1713,24 @@ def _parse_2(f):
def _parse_2c(line, losstype, rw=-1, rskin=-1, kskin=-1, B=-1, C=-1, P=-1,
cwc=-1):
+ """
+
+ Parameters
+ ----------
+ line
+ losstype
+ rw
+ rskin
+ kskin
+ B
+ C
+ P
+ cwc
+
+ Returns
+ -------
+
+ """
if not isinstance(line, list):
line = line_parse(line)
nd = {} # dict of dataset 2c/2d items
@@ -1510,6 +1756,18 @@ def _parse_2c(line, losstype, rw=-1, rskin=-1, kskin=-1, B=-1, C=-1, P=-1,
def _parse_4a(line, mnw, gwt=False):
+ """
+
+ Parameters
+ ----------
+ line
+ mnw
+ gwt
+
+ Returns
+ -------
+
+ """
capmult = 0
cprime = 0
line = line_parse(line)
@@ -1525,6 +1783,16 @@ def _parse_4a(line, mnw, gwt=False):
def _parse_4b(line):
+ """
+
+ Parameters
+ ----------
+ line
+
+ Returns
+ -------
+
+ """
qfrcmn = 0
qfrcmx = 0
line = line_parse(line)
@@ -1542,7 +1810,8 @@ def __init__(self, itmp, nactivewells):
self.nactivewells = nactivewells
def __str__(self):
- return (
- '\n\nItmp value of {} is positive but does not equal the number of active wells specified ({}). '
- 'See Mnw2 package documentation for details.'.format(self.itmp,
- self.nactivewells))
+ s = '\n\nItmp value of {} '.format(self.itmp) + \
+ 'is positive but does not equal the number of active wells ' + \
+ 'specified ({}). '.format(self.nactivewells) + \
+ 'See MNW2 package documentation for details.'
+ return s
diff --git a/flopy/modflow/mfsfr2.py b/flopy/modflow/mfsfr2.py
index 5d1203bbdb..ea5e4966ed 100644
--- a/flopy/modflow/mfsfr2.py
+++ b/flopy/modflow/mfsfr2.py
@@ -504,7 +504,8 @@ def df(self):
if pd:
return pd.DataFrame(self.reach_data)
else:
- return None
+ msg = 'ModflowSfr2.df: pandas not available'
+ raise ImportError(msg)
def _set_paths(self):
graph = self.graph
@@ -1255,7 +1256,8 @@ def renumber_channel_data(d):
self.channel_flow_data = renumber_channel_data(self.channel_flow_data)
def plot_path(self, start_seg=None, end_seg=0, plot_segment_lines=True):
- """Plot a profile of streambed elevation and model top
+ """
+ Plot a profile of streambed elevation and model top
along a path of segments.
Parameters
@@ -1274,8 +1276,8 @@ def plot_path(self, start_seg=None, end_seg=0, plot_segment_lines=True):
"""
import matplotlib.pyplot as plt
if not pd:
- print('This method requires pandas')
- return
+ msg = 'ModflowSfr2.plot_path: pandas not available'
+ raise ImportError(msg)
df = self.df
m = self.parent
diff --git a/flopy/mt3d/mtrct.py b/flopy/mt3d/mtrct.py
index cc62ca47c7..9537a89a5b 100644
--- a/flopy/mt3d/mtrct.py
+++ b/flopy/mt3d/mtrct.py
@@ -107,8 +107,8 @@ class Mt3dRct(Package):
File unit number. If file unit number is None then an unused unit
number if used. (default is None).
- **kwargs
- --------
+ Other Parameters
+ ----------------
srconcn : float or array of floats (nlay, nrow, ncol)
srconcn is the user-specified initial concentration for the sorbed
phase of species n. If srconcn is not passed as a **kwarg and
@@ -133,15 +133,6 @@ class Mt3dRct(Package):
n. If rc2n is not passed as a **kwarg and ireact > 0 then rc2 for
species n is set to 0. See description of rc2 for a more complete
description of rc2n.
- extension : string
- Filename extension (default is 'rct')
- unitnumber : int
- File unit number (default is None).
- filenames : str or list of str
- Filenames to use for the package. If filenames=None the package name
- will be created using the model name and package extension. If a
- single string is passed the package will be set to the string.
- Default is None.
Attributes
diff --git a/flopy/mt3d/mtsft.py b/flopy/mt3d/mtsft.py
index 3b2f5741fe..7a81e601d4 100644
--- a/flopy/mt3d/mtsft.py
+++ b/flopy/mt3d/mtsft.py
@@ -446,13 +446,10 @@ def load(f, model, nsfinit=None, nper=None, ncomp=None,
>>> import os
>>> import flopy
-
- >>> os.chdir(r'C:\temp\flopy_test\sfr_test')
- >>> mf = flopy.modflow.Modflow.load('CrnkNic_mf.nam', load_only=['dis', 'bas6'])
+ >>> mf = flopy.modflow.Modflow.load('CrnkNic_mf.nam',
+ ... load_only=['dis', 'bas6'])
>>> sfr = flopy.modflow.ModflowSfr2.load('CrnkNic.sfr2', mf)
- >>> chk = sfr.check()
-
- >>> mt = flopy.mt3d.Mt3dms.load('CrnkNic_mt.nam', exe_name = 'mt3d-usgs_1.0.00.exe', load_only='btn')
+ >>> mt = flopy.mt3d.Mt3dms.load('CrnkNic_mt.nam', load_only='btn')
>>> sft = flopy.mt3d.Mt3dSft.load('CrnkNic.sft', mt)
"""
diff --git a/flopy/utils/binaryfile.py b/flopy/utils/binaryfile.py
index 201e4b5c2e..202db976fe 100755
--- a/flopy/utils/binaryfile.py
+++ b/flopy/utils/binaryfile.py
@@ -1,8 +1,9 @@
"""
-Module to read MODFLOW binary output files. The module contains three
+Module to read MODFLOW binary output files. The module contains four
important classes that can be accessed by the user.
* HeadFile (Binary head file. Can also be used for drawdown)
+* HeadUFile (Binary MODFLOW-USG unstructured head file)
* UcnFile (Binary concentration file from MT3DMS)
* CellBudgetFile (Binary cell-by-cell flow file)
@@ -95,7 +96,7 @@ def create(bintype=None, precision='single', **kwargs):
return header.get_values()
-def binaryread_struct(file, vartype, shape=(1, ), charlen=16):
+def binaryread_struct(file, vartype, shape=(1,), charlen=16):
"""
Read text, a scalar value, or an array of values from a binary file.
file : file object
@@ -140,7 +141,7 @@ def binaryread_struct(file, vartype, shape=(1, ), charlen=16):
return result
-def binaryread(file, vartype, shape=(1, ), charlen=16):
+def binaryread(file, vartype, shape=(1,), charlen=16):
"""
Uses numpy to read from binary file. This was found to be faster than the
struct approach and is used as the default.
@@ -675,8 +676,8 @@ def _build_index(self):
print(itxt + ': ' + str(s))
print('file position: ', ipos)
if int(header['imeth']) != 5 and \
- int(header['imeth']) != 6 and \
- int(header['imeth']) != 7:
+ int(header['imeth']) != 6 and \
+ int(header['imeth']) != 7:
print('')
# store record and byte position mapping
@@ -741,8 +742,8 @@ def _skip_record(self, header):
print('nlist: ', nlist)
print('')
nbytes = nlist * (
- np.int32(1).nbytes * 2 + self.realtype(1).nbytes +
- naux * self.realtype(1).nbytes)
+ np.int32(1).nbytes * 2 + self.realtype(1).nbytes +
+ naux * self.realtype(1).nbytes)
else:
raise Exception('invalid method code ' + str(imeth))
if nbytes != 0:
@@ -833,7 +834,7 @@ def list_unique_records(self):
Print a list of unique record names
"""
print('RECORD IMETH')
- print(22*'-')
+ print(22 * '-')
for rec, imeth in zip(self.textlist, self.imethlist):
if isinstance(rec, bytes):
rec = rec.decode()
@@ -1098,7 +1099,7 @@ def get_ts(self, idx, text=None, times=None):
timesint = self.get_times()
if len(timesint) < 1:
if times is None:
- timesint = [x+1 for x in range(len(kk))]
+ timesint = [x + 1 for x in range(len(kk))]
else:
if isinstance(times, np.ndarray):
times = times.tolist()
@@ -1481,7 +1482,7 @@ def close(self):
class HeadUFile(BinaryLayerFile):
"""
- USG HeadUFile Class.
+ Unstructured MODFLOW-USG HeadUFile Class.
Parameters
----------
@@ -1536,6 +1537,9 @@ class HeadUFile(BinaryLayerFile):
def __init__(self, filename, text='headu', precision='auto',
verbose=False, **kwargs):
+ """
+ Class constructor
+ """
self.text = text.encode()
if precision == 'auto':
precision = get_headfile_precision(filename)
@@ -1578,7 +1582,7 @@ def _get_data_array(self, totim=0.):
print(msg)
self.file.seek(ipos, 0)
data[ilay - 1] = binaryread(self.file, self.realtype,
- shape=(npl, ))
+ shape=(npl,))
return data
def get_databytes(self, header):
@@ -1603,5 +1607,31 @@ def get_databytes(self, header):
return npl * np.int64(self.realtype(1).nbytes)
def get_ts(self, idx):
- raise NotImplementedError()
+ """
+ Get a time series from the binary HeadUFile (not implemented).
+
+ Parameters
+ ----------
+ idx : tuple of ints, or a list of a tuple of ints
+ idx can be (layer, row, column) or it can be a list in the form
+ [(layer, row, column), (layer, row, column), ...]. The layer,
+ row, and column values must be zero based.
+
+ Returns
+ ----------
+ out : numpy array
+ Array has size (ntimes, ncells + 1). The first column in the
+ data array will contain time (totim).
+ See Also
+ --------
+
+ Notes
+ -----
+
+ Examples
+ --------
+
+ """
+ msg = 'HeadUFile: get_ts() is not implemented'
+ raise NotImplementedError(msg)
diff --git a/flopy/utils/check.py b/flopy/utils/check.py
index 0148206023..975f15600e 100644
--- a/flopy/utils/check.py
+++ b/flopy/utils/check.py
@@ -36,8 +36,6 @@ class check:
sy : tuple
Reasonable minimum/maximum specific storage values;
Default is (3.3e-6, 2e-2) after Anderson, Woessner and Hunt (2015, Table 5.2).
- thin_cell_threshold : float
- Minimum cell thickness in model units. Thicknesses below this value will be flagged (default 1.0).
Notes
-----
diff --git a/flopy/utils/flopy_io.py b/flopy/utils/flopy_io.py
index 00e1e8f502..6356954c90 100755
--- a/flopy/utils/flopy_io.py
+++ b/flopy/utils/flopy_io.py
@@ -228,10 +228,12 @@ def flux_to_wel(cbc_file,text,precision="single",model=None,verbose=False):
wel = ModflowWel(model,stress_period_data=sp_data)
return wel
-def loadtxt(file, delimiter=' ', dtype=None, skiprows=0, use_pandas=True, **kwargs):
- """Use pandas if it is available to load a text file
- (significantly faster than n.loadtxt or genfromtxt;
- see http://stackoverflow.com/questions/18259393/numpy-loading-csv-too-slow-compared-to-matlab)
+def loadtxt(file, delimiter=' ', dtype=None, skiprows=0, use_pandas=True,
+ **kwargs):
+ """
+ Use pandas if it is available to load a text file
+ (significantly faster than n.loadtxt or genfromtxt see
+ http://stackoverflow.com/questions/18259393/numpy-loading-csv-too-slow-compared-to-matlab)
Parameters
----------
@@ -261,6 +263,9 @@ def loadtxt(file, delimiter=' ', dtype=None, skiprows=0, use_pandas=True, **kwar
if isinstance(dtype, np.dtype) and 'names' not in kwargs:
kwargs['names'] = dtype.names
except:
+ if use_pandas:
+ msg = 'loadtxt: pandas is not available'
+ raise ImportError(msg)
pd = False
if use_pandas and pd:
diff --git a/flopy/utils/mfgrdfile.py b/flopy/utils/mfgrdfile.py
index 1fb956da3a..b4177c10c2 100644
--- a/flopy/utils/mfgrdfile.py
+++ b/flopy/utils/mfgrdfile.py
@@ -37,7 +37,7 @@ class MfGrdFile(FlopyBinaryData):
Notes
-----
The MfGrdFile class provides simple ways to retrieve data from binary
- MODFLOW 6 binary grid files (*.grb). The binary grid file contains data
+ MODFLOW 6 binary grid files (.grb). The binary grid file contains data
that can be used for post processing MODFLOW 6 model results.
Examples
diff --git a/flopy/utils/mflistfile.py b/flopy/utils/mflistfile.py
index 1f5f31784f..76fe784560 100644
--- a/flopy/utils/mflistfile.py
+++ b/flopy/utils/mflistfile.py
@@ -450,7 +450,7 @@ def get_dataframes(self, start_datetime='1-1-1970',diff=False):
Returns
-------
- out : panda dataframes
+ out : pandas dataframes
Pandas dataframes with the incremental and cumulative water budget
items in list file. A separate pandas dataframe is returned for the
incremental and cumulative water budget entries.
@@ -465,9 +465,8 @@ def get_dataframes(self, start_datetime='1-1-1970',diff=False):
try:
import pandas as pd
except Exception as e:
- raise Exception(
- "ListBudget.get_dataframe() error import pandas: " + \
- str(e))
+ msg = "ListBudget.get_dataframe(): requires pandas: " + str(e)
+ raise ImportError(msg)
if not self._isvalid:
return None
diff --git a/flopy/utils/modpathfile.py b/flopy/utils/modpathfile.py
index 49d14c7e3f..0a11a9f53a 100644
--- a/flopy/utils/modpathfile.py
+++ b/flopy/utils/modpathfile.py
@@ -967,6 +967,7 @@ def write_shapefile(self, endpoint_data=None,
EPSG code for writing projection (.prj) file. If this is not supplied,
the proj4 string or epgs code associated with sr will be used.
kwargs : keyword arguments to flopy.export.shapefile_utils.recarray2shp
+
"""
from ..utils.reference import SpatialReference
from ..utils.geometry import Point
diff --git a/flopy/utils/mtlistfile.py b/flopy/utils/mtlistfile.py
index 3fad151df0..07458efc4f 100644
--- a/flopy/utils/mtlistfile.py
+++ b/flopy/utils/mtlistfile.py
@@ -1,6 +1,6 @@
"""
-This is a class for reading the mass budget from a (multi-component) mt3d(usgs) run.
-Support SFT budget also
+This is a class for reading the mass budget from a (multi-component)
+mt3d(usgs) run. Also includes support for SFT budget.
"""
import os
@@ -20,8 +20,6 @@ class MtListBudget(object):
----------
file_name : str
the list file name
- timeunit : str
- the time unit to return in the recarray. (default is 'days')
Examples
@@ -33,6 +31,9 @@ class MtListBudget(object):
"""
def __init__(self, file_name):
+ """
+ Class constructor
+ """
# Set up file reading
assert os.path.exists(file_name), "file_name {0} not found".format(
@@ -47,21 +48,25 @@ def __init__(self, file_name):
# Assign the budgetkey, which should have been overriden
self.gw_budget_key = ">>>for component no."
- self.sw_budget_key = "STREAM MASS BUDGETS AT END OF TRANSPORT STEP".lower()
- self.time_key = "TOTAL ELAPSED TIME SINCE BEGINNING OF SIMULATION".lower()
+ line = 'STREAM MASS BUDGETS AT END OF TRANSPORT STEP'
+ self.sw_budget_key = line.lower()
+ line = 'TOTAL ELAPSED TIME SINCE BEGINNING OF SIMULATION'
+ self.time_key = line.lower()
return
def parse(self, forgive=True, diff=True, start_datetime=None,
time_unit='d'):
- """main entry point for parsing the list file.
+ """
+ Main entry point for parsing the list file.
Parameters
----------
forgive : bool
flag to raise exceptions when fail-to-read occurs. Default is True
diff : bool
- flag to return dataframes with 'in minus out' columns. Default is True
+ flag to return dataframes with 'in minus out' columns. Default
+ is True
start_datetime : str
str that can be parsed by pandas.to_datetime. Example: '1-1-1970'.
Default is None.
@@ -77,8 +82,9 @@ def parse(self, forgive=True, diff=True, start_datetime=None,
try:
import pandas as pd
except:
- print("must use pandas")
- return
+ msg = 'MtListBudget.parse: pandas not available'
+ raise ImportError(msg)
+
self.gw_data = {}
self.sw_data = {}
self.lcount = 0
@@ -95,7 +101,7 @@ def parse(self, forgive=True, diff=True, start_datetime=None,
except Exception as e:
warnings.warn(
"error parsing GW mass budget starting on line {0}: {1} ".
- format(self.lcount, str(e)))
+ format(self.lcount, str(e)))
break
else:
self._parse_gw(f, line)
@@ -106,7 +112,7 @@ def parse(self, forgive=True, diff=True, start_datetime=None,
except Exception as e:
warnings.warn(
"error parsing SW mass budget starting on line {0}: {1} ".
- format(self.lcount, str(e)))
+ format(self.lcount, str(e)))
break
else:
self._parse_sw(f, line)
@@ -124,7 +130,6 @@ def parse(self, forgive=True, diff=True, start_datetime=None,
df_gw = pd.DataFrame(self.gw_data)
df_gw.loc[:, "totim"] = df_gw.pop("totim_1")
-
# if cumulative:
# keep = [c for c in df_gw.columns if "_flx" not in c]
# df_gw = df_gw.loc[:,keep]
@@ -179,11 +184,12 @@ def _diff(self, df):
try:
import pandas as pd
except:
- print("must use pandas")
- return
+ msg = 'MtListBudget._diff: pandas not available'
+ raise ImportError(msg)
+
out_cols = [c for c in df.columns if "_out" in c]
in_cols = [c for c in df.columns if "_in" in c]
- out_base =[c.replace("_out", '') for c in out_cols]
+ out_base = [c.replace("_out", '') for c in out_cols]
in_base = [c.replace("_in", '') for c in in_cols]
in_dict = {ib: ic for ib, ic in zip(in_base, in_cols)}
out_dict = {ib: ic for ib, ic in zip(out_base, out_cols)}
@@ -192,7 +198,7 @@ def _diff(self, df):
out_base.update(in_base)
out_base = list(out_base)
out_base.sort()
- new = {"totim":df.totim}
+ new = {"totim": df.totim}
for col in out_base:
if col in out_dict:
odata = df.loc[:, out_dict[col]]
@@ -204,7 +210,7 @@ def _diff(self, df):
idata = 0.0
new[col] = idata - odata
- return pd.DataFrame(new,index=df.index)
+ return pd.DataFrame(new, index=df.index)
def _readline(self, f):
line = f.readline().lower()
@@ -290,8 +296,8 @@ def _parse_sw(self, f, line):
for _ in range(4):
line = self._readline(f)
if line is None:
- raise Exception(
- "EOF while reading from time step to SW budget")
+ msg = "EOF while reading from time step to SW budget"
+ raise Exception(msg)
while True:
line = self._readline(f)
if line is None:
@@ -301,9 +307,10 @@ def _parse_sw(self, f, line):
try:
item, cval, fval = self._parse_sw_line(line)
except Exception as e:
+ msg = "error parsing 'in' SW items on line {}: " + '{}'.format(
+ self.lcountm, str(e))
raise Exception(
- "error parsing 'in' SW items on line {0}: {1}".format(
- self.lcountm, str(e)))
+ )
item += '_{0}_{1}'.format(comp, 'in')
for lab, val in zip(['_cum', '_flx'], [cval, fval]):
iitem = item + lab
diff --git a/flopy/utils/observationfile.py b/flopy/utils/observationfile.py
index ebc9e1d025..59f508ae22 100644
--- a/flopy/utils/observationfile.py
+++ b/flopy/utils/observationfile.py
@@ -171,9 +171,9 @@ def get_dataframe(self, start_datetime='1-1-1970',
import pandas as pd
from ..utils.utils_def import totim_to_datetime
except Exception as e:
- raise Exception(
- "ObsFiles.get_dataframe() error import pandas: " + \
- str(e))
+ msg = "ObsFiles.get_dataframe() error import pandas: " + str(e)
+ raise ImportError(msg)
+
i0 = 0
i1 = self.data.shape[0]
if totim is not None:
diff --git a/flopy/utils/postprocessing.py b/flopy/utils/postprocessing.py
index 222e5448da..99a90a270f 100644
--- a/flopy/utils/postprocessing.py
+++ b/flopy/utils/postprocessing.py
@@ -1,17 +1,21 @@
import numpy as np
+
def get_transmissivities(heads, m,
r=None, c=None, x=None, y=None,
sctop=None, scbot=None, nodata=-999):
- """Computes transmissivity in each model layer at specified locations and open intervals.
- A saturated thickness is determined for each row, column or x, y location supplied,
- based on the open interval (sctop, scbot), if supplied, otherwise the layer tops and bottoms
- and the water table are used.
+ """
+ Computes transmissivity in each model layer at specified locations and
+ open intervals. A saturated thickness is determined for each row, column
+ or x, y location supplied, based on the open interval (sctop, scbot),
+ if supplied, otherwise the layer tops and bottoms and the water table
+ are used.
Parameters
----------
heads : 2D array OR 3D array
- numpy array of shape nlay by n locations (2D) OR complete heads array of the model for one time (3D)
+ numpy array of shape nlay by n locations (2D) OR complete heads array
+ of the model for one time (3D)
m : flopy.modflow.Modflow object
Must have dis, sr, and lpf or upw packages.
r : 1D array-like of ints, of length n locations
@@ -33,6 +37,7 @@ def get_transmissivities(heads, m,
-------
T : 2D array of same shape as heads (nlay x n locations)
Transmissivities in each layer at each location
+
"""
if r is not None and c is not None:
pass
@@ -54,9 +59,10 @@ def get_transmissivities(heads, m,
botm = m.dis.botm.array[:, r, c]
if heads.shape == (m.nlay, m.nrow, m.ncol):
- heads = heads[:,r,c]
+ heads = heads[:, r, c]
- assert heads.shape == botm.shape, 'Shape of heads array must be nlay x nhyd'
+ msg = 'Shape of heads array must be nlay x nhyd'
+ assert heads.shape == botm.shape, msg
# set open interval tops/bottoms to model top/bottom if None
if sctop is None:
@@ -108,9 +114,11 @@ def get_transmissivities(heads, m,
T = thick * hk
return T
+
def get_water_table(heads, nodata, per_idx=None):
- """Get a 2D array representing the water table
- elevation for each stress period in heads array.
+ """
+ Get a 2D array representing the water table elevation for each
+ stress period in heads array.
Parameters
----------
@@ -120,7 +128,7 @@ def get_water_table(heads, nodata, per_idx=None):
HDRY value indicating dry cells.
per_idx : int or sequence of ints
stress periods to return. If None,
- returns all stress periods (default).
+ returns all stress periods (default is None).
Returns
-------
wt : 2 or 3-D np.ndarray of water table elevations
@@ -147,8 +155,10 @@ def get_water_table(heads, nodata, per_idx=None):
wt.append(np.reshape(wt_per, (nrow, ncol)))
return np.squeeze(wt)
+
def get_saturated_thickness(heads, m, nodata, per_idx=None):
- """Calculates the saturated thickness for each cell from the heads
+ """
+ Calculates the saturated thickness for each cell from the heads
array for each stress period.
Parameters
@@ -187,8 +197,10 @@ def get_saturated_thickness(heads, m, nodata, per_idx=None):
sat_thickness.append(perthickness.filled(np.nan))
return np.squeeze(sat_thickness)
+
def get_gradients(heads, m, nodata, per_idx=None):
- """Calculates the hydraulic gradients from the heads
+ """
+ Calculates the hydraulic gradients from the heads
array for each stress period.
Parameters
@@ -227,5 +239,5 @@ def get_gradients(heads, m, nodata, per_idx=None):
dz = np.ma.array(np.diff(zcnt_per.data, axis=0), mask=diff_mask)
dh = np.ma.array(np.diff(hds.data, axis=0), mask=diff_mask)
# convert to nan-filled array, as is expected(!?)
- grad.append((dh/dz).filled(np.nan))
+ grad.append((dh / dz).filled(np.nan))
return np.squeeze(grad)
diff --git a/flopy/utils/recarray_utils.py b/flopy/utils/recarray_utils.py
index 1bd40e7b96..eabcf73d22 100644
--- a/flopy/utils/recarray_utils.py
+++ b/flopy/utils/recarray_utils.py
@@ -1,24 +1,99 @@
import numpy as np
+
def create_empty_recarray(length, dtype, default_value=0):
+ """
+ Create a empty recarray with a defined default value for floats.
+
+ Parameters
+ ----------
+ length : int
+ Shape of the empty recarray.
+ dtype : np.dtype
+ dtype of the empty recarray.
+ default_value : float
+ default value to use for floats in recarray.
+
+ Returns
+ -------
+ r : np.recarray
+ Recarray of type dtype with shape length.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> import flopy
+ >>> dtype = np.dtype([('x', np.float32), ('y', np.float32)])
+ >>> ra = flopy.utils.create_empty_recarray(10, dtype)
+
+ """
r = np.zeros(length, dtype=dtype)
- assert isinstance(dtype, np.dtype), "dtype argument must be an instance of np.dtype, not list."
+ msg = 'dtype argument must be an instance of np.dtype, not list.'
+ assert isinstance(dtype, np.dtype), msg
for name in dtype.names:
dt = dtype.fields[name][0]
if np.issubdtype(dt, np.float_):
r[name] = default_value
return r.view(np.recarray)
+
def ra_slice(ra, cols):
+ """
+ Create a slice of a recarray
+
+ Parameters
+ ----------
+ ra : np.recarray
+ recarray to extract a limited number of columns from.
+ cols : list of str
+ List of key names to extract from ra.
+
+ Returns
+ -------
+ ra_slice : np.recarray
+ Slice of ra
+
+ Examples
+ --------
+ >>> import flopy
+ >>> raslice = flopy.utils.ra_slice(ra, ['x', 'y'])
+
+
+ """
raslice = np.column_stack([ra[c] for c in cols])
dtype = [(str(d[0]), str(d[1])) for d in ra.dtype.descr if d[0] in cols]
return np.array([tuple(r) for r in raslice],
dtype=dtype).view(np.recarray)
+
def recarray(array, dtype):
- # handle sequences of lists
- # (recarrays must be constructed from tuples)
+ """
+ Convert a list of lists or tuples to a recarray.
+
+ Parameters
+ ----------
+ array : list of lists
+ list of lists containing data to convert to a recarray. The number of
+ entries in each list in the list must be the same.
+ dtype : np.dtype
+ dtype of the array data
+
+ Returns
+ -------
+ r : np.recarray
+ Recarray of type dtype with shape equal to the length of array.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> import flopy
+ >>> dtype = np.dtype([('x', np.float32), ('y', np.float32)])
+ >>> arr = [(1., 2.), (10., 20.), (100., 200.)]
+ >>> ra = flopy.utils.recarray(arr, dtype)
+
+ """
array = np.atleast_2d(array)
+ # convert each entry of the list to a tuple
if not isinstance(array[0], tuple):
array = list(map(tuple, array))
- return np.array(array, dtype=dtype).view(np.recarray)
\ No newline at end of file
+ return np.array(array, dtype=dtype).view(np.recarray)
diff --git a/flopy/utils/reference.py b/flopy/utils/reference.py
index 80a4291f8f..00487690fc 100755
--- a/flopy/utils/reference.py
+++ b/flopy/utils/reference.py
@@ -2040,8 +2040,9 @@ def _getprojcs_unit(self):
def getprj(epsg, addlocalreference=True, text='esriwkt'):
- """Gets projection file (.prj) text for given epsg code from spatialreference.org
- See: https://www.epsg-registry.org/
+ """
+ Gets projection file (.prj) text for given epsg code from
+ spatialreference.org
Parameters
----------
@@ -2051,6 +2052,10 @@ def getprj(epsg, addlocalreference=True, text='esriwkt'):
adds the projection file text associated with epsg to a local
database, epsgref.py, located in site-packages.
+ References
+ ----------
+ https://www.epsg-registry.org/
+
Returns
-------
prj : str
diff --git a/flopy/utils/sfroutputfile.py b/flopy/utils/sfroutputfile.py
index a7906a347e..105a352524 100644
--- a/flopy/utils/sfroutputfile.py
+++ b/flopy/utils/sfroutputfile.py
@@ -1,5 +1,6 @@
import numpy as np
+
class SfrFile():
"""
Read SFR package results from text file (ISTCB2 > 0)
@@ -31,8 +32,6 @@ class SfrFile():
"""
-
-
# non-float dtypes (default is float)
dtypes = {"layer": int,
"row": int,
@@ -56,16 +55,26 @@ def __init__(self, filename, geometries=None, verbose=False):
self.filename = filename
self.sr, self.ncol = self.get_skiprows_ncols()
self.names = ["layer", "row", "column", "segment", "reach",
- "Qin", "Qaquifer", "Qout", "Qovr",
- "Qprecip", "Qet",
- "stage", "depth", "width", "Cond"]
- self._set_names() # ensure correct number of column names
+ "Qin", "Qaquifer", "Qout", "Qovr",
+ "Qprecip", "Qet",
+ "stage", "depth", "width", "Cond"]
+ self._set_names() # ensure correct number of column names
self.times = self.get_times()
- self.geoms = None # not implemented yet
+ self.geoms = None # not implemented yet
self._df = None
def get_skiprows_ncols(self):
- """Get the number of rows to skip at the top."""
+ """
+ Get the number of rows to skip at the top of the SFR output file.
+
+ Returns
+ -------
+ i : int
+ Number of lines to skip at the top of the SFR output file
+ ncols : int
+ Number of columns in the SFR output file
+
+ """
with open(self.filename) as input:
for i, line in enumerate(input):
line = line.strip().split()
@@ -74,7 +83,15 @@ def get_skiprows_ncols(self):
return i, ncols
def get_times(self):
- """Parse the stress period/timestep headers."""
+ """
+ Parse the stress period/timestep headers.
+
+ Returns
+ -------
+ kstpkper : tuple
+ list of kstp, kper tuples
+
+ """
kstpkper = []
with open(self.filename) as input:
for line in input:
@@ -85,12 +102,19 @@ def get_times(self):
return kstpkper
def _set_names(self):
- """Pad column names so that correct number is used
- (otherwise Pandas read_csv may drop columns)"""
+ """
+ Pad column names so that correct number is used (otherwise Pandas
+ read_csv may drop columns)
+
+ Returns
+ -------
+ None
+
+ """
if len(self.names) < self.ncol:
n = len(self.names)
for i in range(n, self.ncol):
- self.names.append('col{}'.format(i+1))
+ self.names.append('col{}'.format(i + 1))
@property
def df(self):
@@ -100,7 +124,15 @@ def df(self):
@staticmethod
def get_nstrm(df):
- """Get the number of SFR cells from the results dataframe."""
+ """
+ Get the number of SFR cells from the results dataframe.
+
+ Returns
+ -------
+ nrch : int
+ Number of SFR cells
+
+ """
wherereach1 = np.where((df.segment == 1) & (df.reach == 1))[0]
if len(wherereach1) == 1:
return len(df)
@@ -108,12 +140,19 @@ def get_nstrm(df):
return wherereach1[1]
def get_dataframe(self):
- """Read the whole text file into a pandas dataframe."""
+ """
+ Read the whole text file into a pandas dataframe.
+
+ Returns
+ -------
+ df : pandas dataframe
+ SFR output as a pandas dataframe
+ """
df = self.pd.read_csv(self.filename, delim_whitespace=True,
- header=None, names=self.names,
- error_bad_lines=False,
- skiprows=self.sr, low_memory=False)
+ header=None, names=self.names,
+ error_bad_lines=False,
+ skiprows=self.sr, low_memory=False)
# drop text between stress periods; convert to numeric
df['layer'] = self.pd.to_numeric(df.layer, errors='coerce')
df.dropna(axis=0, inplace=True)
@@ -136,7 +175,7 @@ def get_dataframe(self):
df['kstpkper'] = dftimes
df['k'] = df['layer'] - 1
df['i'] = df['row'] - 1
- df['j'] = df['column'] -1
+ df['j'] = df['column'] - 1
if self.geoms is not None:
geoms = self.geoms * self.nstrm
@@ -145,10 +184,25 @@ def get_dataframe(self):
return df
def _get_result(self, segment, reach):
- return self.df.loc[(self.df.segment == segment) & (self.df.reach == reach)].copy()
+ """
+
+ Parameters
+ ----------
+ segment : int or sequence of ints
+ Segment number for each location.
+ reach : int or sequence of ints
+ Reach number for each location
+
+ Returns
+ -------
+
+ """
+ return self.df.loc[
+ (self.df.segment == segment) & (self.df.reach == reach)].copy()
def get_results(self, segment, reach):
- """Get results for a single reach or sequence of segments and reaches.
+ """
+ Get results for a single reach or sequence of segments and reaches.
Parameters
----------
@@ -176,7 +230,3 @@ def get_results(self, segment, reach):
else:
print('No results for segment {}, reach {}!'.format(s, r))
return results
-
-
-
-
diff --git a/flopy/utils/util_array.py b/flopy/utils/util_array.py
index 8a62cd95e7..bef6840479 100644
--- a/flopy/utils/util_array.py
+++ b/flopy/utils/util_array.py
@@ -25,7 +25,6 @@ class ArrayFormat(object):
Parameters
----------
u2d : Util2d instance
-
python : str (optional)
python-style output format descriptor e.g. {0:15.6e}
fortran : str (optional)
@@ -73,7 +72,7 @@ class ArrayFormat(object):
"""
- def __init__(self, u2d, python=None, fortran=None,array_free_format=None):
+ def __init__(self, u2d, python=None, fortran=None, array_free_format=None):
assert isinstance(u2d, Util2d), "ArrayFormat only supports Util2d," + \
"not {0}".format(type(u2d))
@@ -215,7 +214,7 @@ def __setattr__(self, key, value):
if self.dtype == np.float32 and width < self.decimal:
raise Exception("width cannot be less than decimal")
elif self.dtype == np.float32 and \
- width < self.default_float_width:
+ width < self.default_float_width:
print("ArrayFormat warning:setting width less " +
"than default of {0}".format(self.default_float_width))
self._width = width
@@ -504,9 +503,9 @@ def __init__(self, model, shape, dtype, value, name,
self.array_free_format = array_free_format
if isinstance(value, Util3d):
for attr in value.__dict__.items():
- setattr(self,attr[0], attr[1])
+ setattr(self, attr[0], attr[1])
self.model = model
- self.array_free_format=array_free_format
+ self.array_free_format = array_free_format
for i, u2d in enumerate(self.util_2ds):
self.util_2ds[i] = Util2d(model, u2d.shape, u2d.dtype,
u2d._array, name=u2d.name,
@@ -547,7 +546,6 @@ def __init__(self, model, shape, dtype, value, name,
self.iprn = iprn
self.locat = locat
-
self.ext_filename_base = []
if model.external_path is not None:
for k in range(shape[0]):
@@ -579,7 +577,7 @@ def __setattr__(self, key, value):
for u2d in self.util_2ds:
u2d.format = ArrayFormat(u2d, fortran=value,
array_free_format=self.array_free_format)
- super(Util3d,self).__setattr__("fmtin",value)
+ super(Util3d, self).__setattr__("fmtin", value)
elif hasattr(self, "util_2ds") and key == "how":
for u2d in self.util_2ds:
u2d.how = value
@@ -617,8 +615,9 @@ def to_shapefile(self, filename):
>>> ml = flopy.modflow.Modflow.load('test.nam')
>>> ml.lpf.hk.to_shapefile('test_hk.shp')
"""
- warn("Deprecation warning: to_shapefile() is deprecated. use .export()",
- DeprecationWarning)
+ warn(
+ "Deprecation warning: to_shapefile() is deprecated. use .export()",
+ DeprecationWarning)
# from flopy.utils.flopy_io import write_grid_shapefile, shape_attr_name
#
@@ -974,7 +973,7 @@ def __init__(self, model, shape, dtype, value, name, fmtin=None,
self.cnstst = cnstnt
self.iprn = iprn
self.locat = locat
- self.array_free_format=array_free_format
+ self.array_free_format = array_free_format
self.transient_3ds = self.build_transient_sequence()
return
@@ -1098,7 +1097,7 @@ def __get_3d_instance(self, kper, arg):
name = '{}_period{}'.format(self.name_base, kper + 1)
u3d = Util3d(self.model, self.shape, self.dtype, arg,
fmtin=self.fmtin, name=name,
-# ext_filename=ext_filename,
+ # ext_filename=ext_filename,
locat=self.locat,
array_free_format=self.array_free_format)
return u3d
@@ -1174,7 +1173,7 @@ class Transient2d(object):
def __init__(self, model, shape, dtype, value, name, fmtin=None,
cnstnt=1.0, iprn=-1, ext_filename=None, locat=None,
- bin=False,array_free_format=None):
+ bin=False, array_free_format=None):
if isinstance(value, Transient2d):
for attr in value.__dict__.items():
@@ -1313,8 +1312,9 @@ def to_shapefile(self, filename):
>>> ml = flopy.modflow.Modflow.load('test.nam')
>>> ml.rch.rech.as_shapefile('test_rech.shp')
"""
- warn("Deprecation warning: to_shapefile() is deprecated. use .export()",
- DeprecationWarning)
+ warn(
+ "Deprecation warning: to_shapefile() is deprecated. use .export()",
+ DeprecationWarning)
# from flopy.utils.flopy_io import write_grid_shapefile, shape_attr_name
#
@@ -1485,7 +1485,7 @@ def export(self, f, **kwargs):
def get_kper_entry(self, kper):
"""
- get the file entry info for a given kper
+ Get the file entry info for a given kper
returns (itmp,file entry string from Util2d)
"""
if kper in self.transient_2ds:
@@ -1747,7 +1747,7 @@ def _decide_how(self):
self._how = "constant"
# if a filename was passed in or external path was set
elif self.model.external_path is not None or \
- self.vtype == str:
+ self.vtype == str:
if self.format.array_free_format:
self._how = "openclose"
else:
@@ -1866,8 +1866,9 @@ def to_shapefile(self, filename):
>>> ml.dis.top.as_shapefile('test_top.shp')
"""
- warn("Deprecation warning: to_shapefile() is deprecated. use .export()",
- DeprecationWarning)
+ warn(
+ "Deprecation warning: to_shapefile() is deprecated. use .export()",
+ DeprecationWarning)
# from flopy.utils.flopy_io import write_grid_shapefile, shape_attr_name
# name = shape_attr_name(self.name, keep_layer=True)
# write_grid_shapefile(filename, self.model.dis.sr, {name: self.array})
@@ -2053,7 +2054,7 @@ def _get_fixed_cr(self, locat, value=None):
value = self.cnstnt
if self.format.binary:
if locat is None:
- raise Exception("Util2d._get_fixed_cr(): locat is None but"+\
+ raise Exception("Util2d._get_fixed_cr(): locat is None but" + \
"format is binary")
if not self.format.array_free_format:
locat = -1 * np.abs(locat)
@@ -2078,14 +2079,15 @@ def _get_fixed_cr(self, locat, value=None):
def get_internal_cr(self):
if self.format.array_free_format:
cr = 'INTERNAL {0:15} {1:>10s} {2:2.0f} #{3:<30s}\n' \
- .format(self.cnstnt_str, self.format.fortran, self.iprn, self.name)
+ .format(self.cnstnt_str, self.format.fortran, self.iprn,
+ self.name)
return cr
else:
return self._get_fixed_cr(self.locat)
@property
def cnstnt_str(self):
- if isinstance(self.cnstnt,str):
+ if isinstance(self.cnstnt, str):
return self.cnstnt
else:
return "{0:15.6G}".format(self.cnstnt)
@@ -2099,7 +2101,7 @@ def get_openclose_cr(self):
def get_external_cr(self):
locat = self.model.next_ext_unit()
- #if self.format.binary:
+ # if self.format.binary:
# locat = -1 * np.abs(locat)
self.model.add_external(self.model_file_path, locat,
self.format.binary)
@@ -2148,7 +2150,7 @@ def get_file_entry(self, how=None):
elif how == "external" or how == "openclose":
if how == "openclose":
assert self.format.array_free_format, "Util2d error: 'how' is openclose," + \
- "but model doesn't support free fmt"
+ "but model doesn't support free fmt"
# write a file if needed
if self.vtype != str:
@@ -2233,7 +2235,7 @@ def array(self):
model - with the effects of the control record multiplier applied.
"""
- if isinstance(self.cnstnt,str):
+ if isinstance(self.cnstnt, str):
print("WARNING: cnstnt is str for {0}".format(self.name))
return self._array.astype(self.dtype)
if isinstance(self.cnstnt, (int, np.int32)):
@@ -2301,7 +2303,7 @@ def load_block(shape, file_in, dtype):
if len(shape) != 2:
raise ValueError(
'Util2d.load_block(): expected 2 dimensions, found shape {0}'
- .format(shape))
+ .format(shape))
nrow, ncol = shape
data = np.ma.zeros(shape, dtype=dtype)
data.mask = True
@@ -2322,7 +2324,6 @@ def load_block(shape, file_in, dtype):
warn('Util2d.load_block(): blocks do not cover full array')
return data.data
-
@staticmethod
def load_txt(shape, file_in, dtype, fmtin):
"""Load formatted file to a 1-D or 2-D array
@@ -2354,7 +2355,7 @@ def load_txt(shape, file_in, dtype, fmtin):
else:
raise ValueError(
'Util2d.load_txt(): expected 1 or 2 dimensions, found shape {0}'
- .format(shape))
+ .format(shape))
if not hasattr(file_in, 'read'):
file_in = open(file_in, 'r')
npl, fmt, width, decimal = ArrayFormat.decode_fortran_descriptor(fmtin)
@@ -2389,7 +2390,8 @@ def load_txt(shape, file_in, dtype, fmtin):
data = np.fromiter(items, dtype=dtype, count=num_items)
if data.size != num_items:
raise ValueError('Util2d.load_txt(): expected array size {0},'
- ' but found size {1}'.format(num_items, data.size))
+ ' but found size {1}'.format(num_items,
+ data.size))
return data.reshape(shape)
@staticmethod
@@ -2508,7 +2510,8 @@ def load_bin(shape, file_in, dtype, bintype=None):
data = np.fromfile(file_in, dtype=dtype, count=num_items)
if data.size != num_items:
raise ValueError('Util2d.load_bin(): expected array size {0},'
- ' but found size {1}'.format(num_items, data.size))
+ ' but found size {1}'.format(num_items,
+ data.size))
return header_data, data.reshape(shape)
@staticmethod
@@ -2600,7 +2603,7 @@ def parse_value(self, value):
@staticmethod
def load(f_handle, model, shape, dtype, name, ext_unit_dict=None,
- array_free_format=None,array_format="modflow"):
+ array_free_format=None, array_format="modflow"):
"""
functionality to load Util2d instance from an existing
model input file.
@@ -2621,8 +2624,8 @@ def load(f_handle, model, shape, dtype, name, ext_unit_dict=None,
break
# Allows for special MT3D array reader
- #array_format = None
- #if hasattr(model, 'array_format'):
+ # array_format = None
+ # if hasattr(model, 'array_format'):
# array_format = model.array_format
cr_dict = Util2d.parse_control_record(f_handle.readline(),
@@ -2739,8 +2742,6 @@ def parse_control_record(line, current_unit=None, dtype=np.float32,
except:
pass
-
-
nunit = int(raw[1])
if isfloat:
cnstnt = np.float(raw[2].lower().replace('d', 'e'))
@@ -2770,7 +2771,7 @@ def parse_control_record(line, current_unit=None, dtype=np.float32,
cnstnt = np.int(line[10:20].strip())
else:
cnstnt = 0
- #if cnstnt == 0:
+ # if cnstnt == 0:
# cnstnt = 1
if locat != 0:
if len(line) >= 40:
diff --git a/flopy/utils/util_list.py b/flopy/utils/util_list.py
index 4857664f00..559db3b48b 100644
--- a/flopy/utils/util_list.py
+++ b/flopy/utils/util_list.py
@@ -382,7 +382,8 @@ def __cast_ndarray(self, kper, d):
self.__vtype[kper] = np.recarray
def get_dataframe(self, squeeze=True):
- """Cast recarrays for stress periods into single
+ """
+ Cast recarrays for stress periods into single
dataframe containing all stress periods.
Parameters
@@ -406,8 +407,8 @@ def get_dataframe(self, squeeze=True):
try:
import pandas as pd
except Exception as e:
- print("this feature requires pandas")
- return None
+ msg = 'MfList.get_dataframe() requires pandas'
+ raise ImportError(msg)
# make a dataframe of all data for all stress periods
names = ['k', 'i', 'j']
@@ -431,7 +432,8 @@ def get_dataframe(self, squeeze=True):
# add an empty dataframe if a stress period is
# set to 0 (e.g. no pumping during a predevelopment
# period)
- columns = names + list(['{}{}'.format(c, per) for c in varnames])
+ columns = names + list(['{}{}'.format(c, per)
+ for c in varnames])
dfi = pd.DataFrame(data=None, columns=columns)
dfi = dfi.set_index(names)
else:
diff --git a/flopy/utils/zonbud.py b/flopy/utils/zonbud.py
index 38dacd0748..2673a7c022 100644
--- a/flopy/utils/zonbud.py
+++ b/flopy/utils/zonbud.py
@@ -6,11 +6,6 @@
from collections import OrderedDict
from ..utils.utils_def import totim_to_datetime
-try:
- import pandas as pd
-except:
- pass
-
class ZoneBudget(object):
"""
@@ -31,9 +26,13 @@ class ZoneBudget(object):
aliases : dict
A dictionary with key, value pairs of zones and aliases. Replaces
the corresponding record and field names with the aliases provided.
- NOTE: When using this option in conjunction with a list of zones,
- the zone(s) passed may either be all strings (aliases), all
- integers, or mixed.
+ When using this option in conjunction with a list of zones, the
+ zone(s) passed may either be all strings (aliases), all integers,
+ or mixed.
+
+ Returns
+ -------
+ None
Examples
--------
@@ -57,9 +56,12 @@ def __init__(self, cbc_file, z, kstpkper=None, totim=None, aliases=None,
'Cannot load cell budget file: {}.'.format(cbc_file))
if isinstance(z, np.ndarray):
- assert np.issubdtype(z.dtype, np.integer), 'Zones dtype must be integer'
+ assert np.issubdtype(z.dtype,
+ np.integer), 'Zones dtype must be integer'
else:
- raise Exception('Please pass zones as a numpy ndarray of (positive) integers. {}'.format(z.dtype))
+ raise Exception(
+ 'Please pass zones as a numpy ndarray of (positive) integers. {}'.format(
+ z.dtype))
# Check for negative zone values
for zi in np.unique(z):
@@ -117,7 +119,8 @@ def __init__(self, cbc_file, z, kstpkper=None, totim=None, aliases=None,
# Check dimensions of input zone array
s = 'Row/col dimensions of zone array {}' \
- ' do not match model row/col dimensions {}'.format(z.shape, self.cbc_shape)
+ ' do not match model row/col dimensions {}'.format(z.shape,
+ self.cbc_shape)
assert z.shape[-2] == self.nrow and \
z.shape[-1] == self.ncol, s
@@ -131,14 +134,14 @@ def __init__(self, cbc_file, z, kstpkper=None, totim=None, aliases=None,
izone[:] = z[0, :, :]
else:
raise Exception(
- 'Shape of the zone array is not recognized: {}'.format(
- z.shape))
+ 'Shape of the zone array is not recognized: {}'.format(
+ z.shape))
self.izone = izone
self.allzones = [z for z in np.unique(self.izone)]
self._zonenamedict = OrderedDict([(z, 'ZONE_{}'.format(z))
- for z in self.allzones if
- z != 0])
+ for z in self.allzones if
+ z != 0])
if aliases is not None:
assert isinstance(aliases,
@@ -185,11 +188,13 @@ def __init__(self, cbc_file, z, kstpkper=None, totim=None, aliases=None,
array_list = []
if self.kstpkper is not None:
for kk in self.kstpkper:
- recordarray = self._initialize_budget_recordarray(kstpkper=kk, totim=None)
+ recordarray = self._initialize_budget_recordarray(kstpkper=kk,
+ totim=None)
array_list.append(recordarray)
elif self.totim is not None:
for t in self.totim:
- recordarray = self._initialize_budget_recordarray(kstpkper=None, totim=t)
+ recordarray = self._initialize_budget_recordarray(
+ kstpkper=None, totim=t)
array_list.append(recordarray)
self._budget = np.concatenate(array_list, axis=0)
@@ -212,6 +217,18 @@ def __init__(self, cbc_file, z, kstpkper=None, totim=None, aliases=None,
return
def get_model_shape(self):
+ """
+
+ Returns
+ -------
+ nlay : int
+ Number of layers
+ nrow : int
+ Number of rows
+ ncol : int
+ Number of columns
+
+ """
return self.nlay, self.nrow, self.ncol
def get_record_names(self, stripped=False):
@@ -341,6 +358,7 @@ def get_dataframes(self, start_datetime=None, timeunit='D',
index_key='totim', names=None, zones=None, net=False):
"""
Get pandas dataframes.
+
Parameters
----------
@@ -374,9 +392,9 @@ def get_dataframes(self, start_datetime=None, timeunit='D',
try:
import pandas as pd
except Exception as e:
- raise Exception(
- "ZoneBudget.get_dataframes() error import pandas: " + \
- str(e))
+ msg = "ZoneBudget.get_dataframes() error import pandas: " + str(e)
+ raise ImportError(msg)
+
valid_index_keys = ['totim', 'kstpkper']
assert index_key in valid_index_keys, 'index_key "{}" is not valid.'.format(
index_key)
@@ -411,7 +429,7 @@ def get_dataframes(self, start_datetime=None, timeunit='D',
index_cols = ['totim', 'name']
elif index_key == 'kstpkper':
index_cols = ['time_step', 'stress_period', 'name']
- df = df.set_index(index_cols)#.sort_index(level=0)
+ df = df.set_index(index_cols) # .sort_index(level=0)
if zones is not None:
keep_cols = zones
else:
@@ -447,6 +465,18 @@ def _compute_budget(self, kstpkper=None, totim=None):
"""
Creates a budget for the specified zone array. This function only supports the
use of a single time step/stress period or time.
+
+ Parameters
+ ----------
+ kstpkper : tuple
+ Tuple of kstp and kper to compute budget for (default is None).
+ totim : float
+ Totim to compute budget for (default is None).
+
+ Returns
+ -------
+ Noen
+
"""
# Initialize an array to track where the constant head cells
# are located.
@@ -491,8 +521,17 @@ def _compute_budget(self, kstpkper=None, totim=None):
self._compute_mass_balance(kstpkper, totim)
return
-
+
def _get_internal_flow_record_names(self):
+ """
+ Get internal flow record names
+
+ Returns
+ -------
+ iflow_recnames : np.recarray
+ recarray of internal flow terms
+
+ """
iflow_recnames = OrderedDict([(0, 'ZONE_0')])
for z, a in iter(self._zonenamedict.items()):
iflow_recnames[z] = '{}'.format(a)
@@ -501,9 +540,25 @@ def _get_internal_flow_record_names(self):
return iflow_recnames
def _add_empty_record(self, recordarray, recname, kstpkper=None,
- totim=None):
- # Builds empty records based on the specified flow direction and
- # record name for the given list of zones.
+ totim=None):
+ """
+ Build an empty records based on the specified flow direction and
+ record name for the given list of zones.
+
+ Parameters
+ ----------
+ recordarray :
+ recname :
+ kstpkper : tuple
+ Tuple of kstp and kper to compute budget for (default is None).
+ totim : float
+ Totim to compute budget for (default is None).
+
+ Returns
+ -------
+ recordarray : np.recarray
+
+ """
if kstpkper is not None:
if len(self.cbc_times) > 0:
totim = self.cbc_times[self.cbc_kstpkper.index(kstpkper)]
@@ -522,75 +577,125 @@ def _add_empty_record(self, recordarray, recname, kstpkper=None,
return recordarray
def _initialize_budget_recordarray(self, kstpkper=None, totim=None):
- # Initialize the budget record array which will store all of the
- # fluxes in the cell-budget file.
+ """
+ Initialize the budget record array which will store all of the
+ fluxes in the cell-budget file.
+
+ Parameters
+ ----------
+ kstpkper : tuple
+ Tuple of kstp and kper to compute budget for (default is None).
+ totim : float
+ Totim to compute budget for (default is None).
+
+ Returns
+ -------
+
+ """
# Create empty array for the budget terms.
dtype_list = [('totim', '= 2:
@@ -733,7 +820,8 @@ def _accumulate_flow_frf(self, recname, ich, kstpkper, totim):
# ZONE 4 TO 3 IS THE NEGATIVE OF FLOW FROM 3 TO 4.
# 1ST, CALCULATE FLOW BETWEEN NODE J,I,K AND J-1,I,K
- k, i, j = np.where(self.izone[:, :, 1:] > self.izone[:, :, :-1])
+ k, i, j = np.where(
+ self.izone[:, :, 1:] > self.izone[:, :, :-1])
# Adjust column values to account for the starting position of "nz"
j += 1
@@ -757,7 +845,8 @@ def _accumulate_flow_frf(self, recname, ich, kstpkper, totim):
fzi, tzi, fi = sum_flux_tuples(nzl[idx],
nz[idx],
q[idx])
- self._update_budget_fromfaceflow(fzi, tzi, np.abs(fi), kstpkper, totim)
+ self._update_budget_fromfaceflow(fzi, tzi, np.abs(fi),
+ kstpkper, totim)
# Get indices where flow face values are negative (flow into higher zone)
# Don't include CH to CH flow (can occur if CHTOCH option is used)
@@ -768,10 +857,12 @@ def _accumulate_flow_frf(self, recname, ich, kstpkper, totim):
fzi, tzi, fi = sum_flux_tuples(nz[idx],
nzl[idx],
q[idx])
- self._update_budget_fromfaceflow(fzi, tzi, np.abs(fi), kstpkper, totim)
+ self._update_budget_fromfaceflow(fzi, tzi, np.abs(fi),
+ kstpkper, totim)
# FLOW BETWEEN NODE J,I,K AND J+1,I,K
- k, i, j = np.where(self.izone[:, :, :-1] > self.izone[:, :, 1:])
+ k, i, j = np.where(
+ self.izone[:, :, :-1] > self.izone[:, :, 1:])
# Define the zone from which flow is coming
nz = self.izone[k, i, j]
@@ -792,7 +883,8 @@ def _accumulate_flow_frf(self, recname, ich, kstpkper, totim):
fzi, tzi, fi = sum_flux_tuples(nz[idx],
nzr[idx],
q[idx])
- self._update_budget_fromfaceflow(fzi, tzi, np.abs(fi), kstpkper, totim)
+ self._update_budget_fromfaceflow(fzi, tzi, np.abs(fi),
+ kstpkper, totim)
# Get indices where flow face values are negative (flow into higher zone)
# Don't include CH to CH flow (can occur if CHTOCH option is used)
@@ -803,7 +895,8 @@ def _accumulate_flow_frf(self, recname, ich, kstpkper, totim):
fzi, tzi, fi = sum_flux_tuples(nzr[idx],
nz[idx],
q[idx])
- self._update_budget_fromfaceflow(fzi, tzi, np.abs(fi), kstpkper, totim)
+ self._update_budget_fromfaceflow(fzi, tzi, np.abs(fi),
+ kstpkper, totim)
# CALCULATE FLOW TO CONSTANT-HEAD CELLS IN THIS DIRECTION
k, i, j = np.where(ich == 1)
@@ -821,7 +914,8 @@ def _accumulate_flow_frf(self, recname, ich, kstpkper, totim):
f = fi[tzi != 0]
fz = np.array(['TO_CONSTANT_HEAD'] * len(tz))
tz = np.array([self._zonenamedict[z] for z in tzi[tzi != 0]])
- self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper, totim)
+ self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper,
+ totim)
idx = np.where(
(q < 0) & ((ich[k, i, j] != 1) | (ich[k, i, jl] != 1)))
@@ -832,7 +926,8 @@ def _accumulate_flow_frf(self, recname, ich, kstpkper, totim):
f = fi[fzi != 0]
fz = np.array(['FROM_CONSTANT_HEAD'] * len(fz))
tz = np.array([self._zonenamedict[z] for z in tzi[tzi != 0]])
- self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper, totim)
+ self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper,
+ totim)
k, i, j = np.where(ich == 1)
k, i, j = k[j < self.ncol - 1], i[j < self.ncol - 1], j[
@@ -850,7 +945,8 @@ def _accumulate_flow_frf(self, recname, ich, kstpkper, totim):
f = fi[tzi != 0]
fz = np.array(['FROM_CONSTANT_HEAD'] * len(tz))
tz = np.array([self._zonenamedict[z] for z in tzi[tzi != 0]])
- self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper, totim)
+ self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper,
+ totim)
idx = np.where(
(q < 0) & ((ich[k, i, j] != 1) | (ich[k, i, jr] != 1)))
@@ -861,7 +957,8 @@ def _accumulate_flow_frf(self, recname, ich, kstpkper, totim):
f = fi[fzi != 0]
fz = np.array(['TO_CONSTANT_HEAD'] * len(fz))
tz = np.array([self._zonenamedict[z] for z in tzi[tzi != 0]])
- self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper, totim)
+ self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper,
+ totim)
except Exception as e:
print(e)
@@ -870,72 +967,17 @@ def _accumulate_flow_frf(self, recname, ich, kstpkper, totim):
def _accumulate_flow_fff(self, recname, ich, kstpkper, totim):
"""
- C
- C-----"FLOW FRONT FACE"
- C-----CALCULATE FLOW BETWEEN NODE J,I,K AND J,I-1,K
- 400 IF(NROW.LT.2) RETURN
- DO 440 K=1,NLAY
- DO 440 I=2,NROW
- DO 440 J=1,NCOL
- NZ=IZONE(J,I,K)
- IA=I-1
- NZA=IZONE(J,IA,K)
- IF(NZA.LE.NZ) GO TO 440
- C Don't include CH to CH flow (can occur if CHTOCH option is used)
- IF(ICH(J,I,K).EQ.1 .AND. ICH(J,I-1,K).EQ.1) GO TO 440
- DBUFF=BUFFD(J,IA,K)
- IF(DBUFF.LT.DZERO) THEN
- VBZNFL(2,NZ,NZA)=VBZNFL(2,NZ,NZA)-DBUFF
- ELSE
- VBZNFL(1,NZ,NZA)=VBZNFL(1,NZ,NZA)+DBUFF
- END IF
- 440 CONTINUE
- C
- C-----CALCULATE FLOW BETWEEN NODE J,I,K AND J,I+1,K
- DO 470 K=1,NLAY
- DO 470 I=1,NROW-1
- DO 470 J=1,NCOL
- NZ=IZONE(J,I,K)
- IB=I+1
- NZB=IZONE(J,IB,K)
- IF(NZB.LE.NZ) GO TO 470
- C Don't include CH to CH flow (can occur if CHTOCH option is used)
- IF(ICH(J,I,K).EQ.1 .AND. ICH(J,I+1,K).EQ.1) GO TO 470
- DBUFF=BUFFD(J,I,K)
- IF(DBUFF.LT.DZERO) THEN
- VBZNFL(1,NZ,NZB)=VBZNFL(1,NZ,NZB)-DBUFF
- ELSE
- VBZNFL(2,NZ,NZB)=VBZNFL(2,NZ,NZB)+DBUFF
- END IF
- 470 CONTINUE
- C
- C-----CALCULATE FLOW TO CONSTANT-HEAD CELLS IN THIS DIRECTION
- DO 495 K=1,NLAY
- DO 495 I=1,NROW
- DO 495 J=1,NCOL
- IF(ICH(J,I,K).EQ.0) GO TO 495
- NZ=IZONE(J,I,K)
- IF(NZ.EQ.0) GO TO 495
- IF(I.EQ.NROW) GO TO 480
- IF(ICH(J,I+1,K).EQ.1) GO TO 480
- DBUFF=BUFFD(J,I,K)
- IF(DBUFF.EQ.DZERO) THEN
- ELSE IF(DBUFF.LT.DZERO) THEN
- VBVL(2,MSUMCH,NZ)=VBVL(2,MSUMCH,NZ)-DBUFF
- ELSE
- VBVL(1,MSUMCH,NZ)=VBVL(1,MSUMCH,NZ)+DBUFF
- END IF
- 480 IF(I.EQ.1) GO TO 495
- IF(ICH(J,I-1,K).EQ.1) GO TO 495
- DBUFF=BUFFD(J,I-1,K)
- IF(DBUFF.EQ.DZERO) THEN
- ELSE IF(DBUFF.LT.DZERO) THEN
- VBVL(1,MSUMCH,NZ)=VBVL(1,MSUMCH,NZ)-DBUFF
- ELSE
- VBVL(2,MSUMCH,NZ)=VBVL(2,MSUMCH,NZ)+DBUFF
- END IF
- 495 CONTINUE
- RETURN
+
+ Parameters
+ ----------
+ recname
+ ich
+ kstpkper
+ totim
+
+ Returns
+ -------
+
"""
try:
if self.nrow >= 2:
@@ -945,7 +987,8 @@ def _accumulate_flow_fff(self, recname, ich, kstpkper, totim):
# "FLOW FRONT FACE"
# CALCULATE FLOW BETWEEN NODE J,I,K AND J,I-1,K
- k, i, j = np.where(self.izone[:, 1:, :] < self.izone[:, :-1, :])
+ k, i, j = np.where(
+ self.izone[:, 1:, :] < self.izone[:, :-1, :])
i += 1
ia = i - 1
nza = self.izone[k, ia, j]
@@ -956,17 +999,20 @@ def _accumulate_flow_fff(self, recname, ich, kstpkper, totim):
fzi, tzi, fi = sum_flux_tuples(nza[idx],
nz[idx],
q[idx])
- self._update_budget_fromfaceflow(fzi, tzi, np.abs(fi), kstpkper, totim)
+ self._update_budget_fromfaceflow(fzi, tzi, np.abs(fi),
+ kstpkper, totim)
idx = np.where(
(q < 0) & ((ich[k, i, j] != 1) | (ich[k, ia, j] != 1)))
fzi, tzi, fi = sum_flux_tuples(nz[idx],
nza[idx],
q[idx])
- self._update_budget_fromfaceflow(fzi, tzi, np.abs(fi), kstpkper, totim)
+ self._update_budget_fromfaceflow(fzi, tzi, np.abs(fi),
+ kstpkper, totim)
# CALCULATE FLOW BETWEEN NODE J,I,K AND J,I+1,K.
- k, i, j = np.where(self.izone[:, :-1, :] < self.izone[:, 1:, :])
+ k, i, j = np.where(
+ self.izone[:, :-1, :] < self.izone[:, 1:, :])
nz = self.izone[k, i, j]
ib = i + 1
nzb = self.izone[k, ib, j]
@@ -976,14 +1022,16 @@ def _accumulate_flow_fff(self, recname, ich, kstpkper, totim):
fzi, tzi, fi = sum_flux_tuples(nz[idx],
nzb[idx],
q[idx])
- self._update_budget_fromfaceflow(fzi, tzi, np.abs(fi), kstpkper, totim)
+ self._update_budget_fromfaceflow(fzi, tzi, np.abs(fi),
+ kstpkper, totim)
idx = np.where(
(q < 0) & ((ich[k, i, j] != 1) | (ich[k, ib, j] != 1)))
fzi, tzi, fi = sum_flux_tuples(nzb[idx],
nz[idx],
q[idx])
- self._update_budget_fromfaceflow(fzi, tzi, np.abs(fi), kstpkper, totim)
+ self._update_budget_fromfaceflow(fzi, tzi, np.abs(fi),
+ kstpkper, totim)
# CALCULATE FLOW TO CONSTANT-HEAD CELLS IN THIS DIRECTION
k, i, j = np.where(ich == 1)
@@ -1001,7 +1049,8 @@ def _accumulate_flow_fff(self, recname, ich, kstpkper, totim):
f = fi[tzi != 0]
fz = np.array(['TO_CONSTANT_HEAD'] * len(tz))
tz = np.array([self._zonenamedict[z] for z in tzi[tzi != 0]])
- self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper, totim)
+ self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper,
+ totim)
idx = np.where(
(q < 0) & ((ich[k, i, j] != 1) | (ich[k, ia, j] != 1)))
@@ -1012,7 +1061,8 @@ def _accumulate_flow_fff(self, recname, ich, kstpkper, totim):
f = fi[fzi != 0]
fz = np.array(['FROM_CONSTANT_HEAD'] * len(fz))
tz = np.array([self._zonenamedict[z] for z in tzi[tzi != 0]])
- self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper, totim)
+ self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper,
+ totim)
k, i, j = np.where(ich == 1)
k, i, j = k[i < self.nrow - 1], i[i < self.nrow - 1], j[
@@ -1030,7 +1080,8 @@ def _accumulate_flow_fff(self, recname, ich, kstpkper, totim):
f = fi[tzi != 0]
fz = np.array(['FROM_CONSTANT_HEAD'] * len(tz))
tz = np.array([self._zonenamedict[z] for z in tzi[tzi != 0]])
- self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper, totim)
+ self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper,
+ totim)
idx = np.where(
(q < 0) & ((ich[k, i, j] != 1) | (ich[k, ib, j] != 1)))
@@ -1041,7 +1092,8 @@ def _accumulate_flow_fff(self, recname, ich, kstpkper, totim):
f = fi[fzi != 0]
fz = np.array(['TO_CONSTANT_HEAD'] * len(fz))
tz = np.array([self._zonenamedict[z] for z in tzi[tzi != 0]])
- self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper, totim)
+ self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper,
+ totim)
except Exception as e:
print(e)
@@ -1050,72 +1102,17 @@ def _accumulate_flow_fff(self, recname, ich, kstpkper, totim):
def _accumulate_flow_flf(self, recname, ich, kstpkper, totim):
"""
- C
- C-----"FLOW LOWER FACE"
- C-----CALCULATE FLOW BETWEEN NODE J,I,K AND J,I,K-1
- 500 IF(NLAY.LT.2) RETURN
- DO 540 K=2,NLAY
- DO 540 I=1,NROW
- DO 540 J=1,NCOL
- NZ=IZONE(J,I,K)
- KA=K-1
- NZA=IZONE(J,I,KA)
- IF(NZA.LE.NZ) GO TO 540
- C Don't include CH to CH flow (can occur if CHTOCH option is used)
- IF(ICH(J,I,K).EQ.1 .AND. ICH(J,I,K-1).EQ.1) GO TO 540
- DBUFF=BUFFD(J,I,KA)
- IF(DBUFF.LT.DZERO) THEN
- VBZNFL(2,NZ,NZA)=VBZNFL(2,NZ,NZA)-DBUFF
- ELSE
- VBZNFL(1,NZ,NZA)=VBZNFL(1,NZ,NZA)+DBUFF
- END IF
- 540 CONTINUE
- C
- C-----CALCULATE FLOW BETWEEN NODE J,I,K AND J,I,K+1
- DO 570 K=1,NLAY-1
- DO 570 I=1,NROW
- DO 570 J=1,NCOL
- NZ=IZONE(J,I,K)
- KB=K+1
- NZB=IZONE(J,I,KB)
- IF(NZB.LE.NZ) GO TO 570
- C Don't include CH to CH flow (can occur if CHTOCH option is used)
- IF(ICH(J,I,K).EQ.1 .AND. ICH(J,I,K+1).EQ.1) GO TO 570
- DBUFF=BUFFD(J,I,K)
- IF(DBUFF.LT.DZERO) THEN
- VBZNFL(1,NZ,NZB)=VBZNFL(1,NZ,NZB)-DBUFF
- ELSE
- VBZNFL(2,NZ,NZB)=VBZNFL(2,NZ,NZB)+DBUFF
- END IF
- 570 CONTINUE
- C
- C-----CALCULATE FLOW TO CONSTANT-HEAD CELLS IN THIS DIRECTION
- DO 595 K=1,NLAY
- DO 595 I=1,NROW
- DO 595 J=1,NCOL
- IF(ICH(J,I,K).EQ.0) GO TO 595
- NZ=IZONE(J,I,K)
- IF(NZ.EQ.0) GO TO 595
- IF(K.EQ.NLAY) GO TO 580
- IF(ICH(J,I,K+1).EQ.1) GO TO 580
- DBUFF=BUFFD(J,I,K)
- IF(DBUFF.EQ.DZERO) THEN
- ELSE IF(DBUFF.LT.DZERO) THEN
- VBVL(2,MSUMCH,NZ)=VBVL(2,MSUMCH,NZ)-DBUFF
- ELSE
- VBVL(1,MSUMCH,NZ)=VBVL(1,MSUMCH,NZ)+DBUFF
- END IF
- 580 IF(K.EQ.1) GO TO 595
- IF(ICH(J,I,K-1).EQ.1) GO TO 595
- DBUFF=BUFFD(J,I,K-1)
- IF(DBUFF.EQ.DZERO) THEN
- ELSE IF(DBUFF.LT.DZERO) THEN
- VBVL(1,MSUMCH,NZ)=VBVL(1,MSUMCH,NZ)-DBUFF
- ELSE
- VBVL(2,MSUMCH,NZ)=VBVL(2,MSUMCH,NZ)+DBUFF
- END IF
- 595 CONTINUE
- RETURN
+
+ Parameters
+ ----------
+ recname
+ ich
+ kstpkper
+ totim
+
+ Returns
+ -------
+
"""
try:
if self.nlay >= 2:
@@ -1125,7 +1122,8 @@ def _accumulate_flow_flf(self, recname, ich, kstpkper, totim):
# "FLOW LOWER FACE"
# CALCULATE FLOW BETWEEN NODE J,I,K AND J,I,K-1
- k, i, j = np.where(self.izone[1:, :, :] < self.izone[:-1, :, :])
+ k, i, j = np.where(
+ self.izone[1:, :, :] < self.izone[:-1, :, :])
k += 1
ka = k - 1
nza = self.izone[ka, i, j]
@@ -1136,17 +1134,20 @@ def _accumulate_flow_flf(self, recname, ich, kstpkper, totim):
fzi, tzi, fi = sum_flux_tuples(nza[idx],
nz[idx],
q[idx])
- self._update_budget_fromfaceflow(fzi, tzi, np.abs(fi), kstpkper, totim)
+ self._update_budget_fromfaceflow(fzi, tzi, np.abs(fi),
+ kstpkper, totim)
idx = np.where(
(q < 0) & ((ich[k, i, j] != 1) | (ich[ka, i, j] != 1)))
fzi, tzi, fi = sum_flux_tuples(nz[idx],
nza[idx],
q[idx])
- self._update_budget_fromfaceflow(fzi, tzi, np.abs(fi), kstpkper, totim)
+ self._update_budget_fromfaceflow(fzi, tzi, np.abs(fi),
+ kstpkper, totim)
# CALCULATE FLOW BETWEEN NODE J,I,K AND J,I,K+1
- k, i, j = np.where(self.izone[:-1, :, :] < self.izone[1:, :, :])
+ k, i, j = np.where(
+ self.izone[:-1, :, :] < self.izone[1:, :, :])
nz = self.izone[k, i, j]
kb = k + 1
nzb = self.izone[kb, i, j]
@@ -1156,14 +1157,16 @@ def _accumulate_flow_flf(self, recname, ich, kstpkper, totim):
fzi, tzi, fi = sum_flux_tuples(nz[idx],
nzb[idx],
q[idx])
- self._update_budget_fromfaceflow(fzi, tzi, np.abs(fi), kstpkper, totim)
+ self._update_budget_fromfaceflow(fzi, tzi, np.abs(fi),
+ kstpkper, totim)
idx = np.where(
(q < 0) & ((ich[k, i, j] != 1) | (ich[kb, i, j] != 1)))
fzi, tzi, fi = sum_flux_tuples(nzb[idx],
nz[idx],
q[idx])
- self._update_budget_fromfaceflow(fzi, tzi, np.abs(fi), kstpkper, totim)
+ self._update_budget_fromfaceflow(fzi, tzi, np.abs(fi),
+ kstpkper, totim)
# CALCULATE FLOW TO CONSTANT-HEAD CELLS IN THIS DIRECTION
k, i, j = np.where(ich == 1)
@@ -1181,7 +1184,8 @@ def _accumulate_flow_flf(self, recname, ich, kstpkper, totim):
f = fi[tzi != 0]
fz = np.array(['TO_CONSTANT_HEAD'] * len(tz))
tz = np.array([self._zonenamedict[z] for z in tzi[tzi != 0]])
- self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper, totim)
+ self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper,
+ totim)
idx = np.where(
(q < 0) & ((ich[k, i, j] != 1) | (ich[ka, i, j] != 1)))
@@ -1192,7 +1196,8 @@ def _accumulate_flow_flf(self, recname, ich, kstpkper, totim):
f = fi[fzi != 0]
fz = np.array(['FROM_CONSTANT_HEAD'] * len(fz))
tz = np.array([self._zonenamedict[z] for z in tzi[tzi != 0]])
- self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper, totim)
+ self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper,
+ totim)
k, i, j = np.where(ich == 1)
k, i, j = k[k < self.nlay - 1], i[k < self.nlay - 1], j[
@@ -1210,7 +1215,8 @@ def _accumulate_flow_flf(self, recname, ich, kstpkper, totim):
f = fi[tzi != 0]
fz = np.array(['FROM_CONSTANT_HEAD'] * len(tz))
tz = np.array([self._zonenamedict[z] for z in tzi[tzi != 0]])
- self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper, totim)
+ self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper,
+ totim)
idx = np.where(
(q < 0) & ((ich[k, i, j] != 1) | (ich[kb, i, j] != 1)))
@@ -1221,7 +1227,8 @@ def _accumulate_flow_flf(self, recname, ich, kstpkper, totim):
f = fi[fzi != 0]
fz = np.array(['TO_CONSTANT_HEAD'] * len(fz))
tz = np.array([self._zonenamedict[z] for z in tzi[tzi != 0]])
- self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper, totim)
+ self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper,
+ totim)
except Exception as e:
print(e)
@@ -1340,9 +1347,11 @@ def _compute_mass_balance(self, kstpkper, totim):
elif totim is not None:
rowidx = np.where((self._budget['totim'] == totim) &
np.in1d(self._budget['name'], innames))
- a = _numpyvoid2numeric(self._budget[list(self._zonenamedict.values())][rowidx])
+ a = _numpyvoid2numeric(
+ self._budget[list(self._zonenamedict.values())][rowidx])
intot = np.array(a.sum(axis=0))
- tz = np.array(list([n for n in self._budget.dtype.names if n not in skipcols]))
+ tz = np.array(
+ list([n for n in self._budget.dtype.names if n not in skipcols]))
fz = np.array(['TOTAL_IN'] * len(tz))
self._update_budget_fromssst(fz, tz, intot, kstpkper, totim)
@@ -1354,20 +1363,24 @@ def _compute_mass_balance(self, kstpkper, totim):
elif totim is not None:
rowidx = np.where((self._budget['totim'] == totim) &
np.in1d(self._budget['name'], outnames))
- a = _numpyvoid2numeric(self._budget[list(self._zonenamedict.values())][rowidx])
+ a = _numpyvoid2numeric(
+ self._budget[list(self._zonenamedict.values())][rowidx])
outot = np.array(a.sum(axis=0))
- tz = np.array(list([n for n in self._budget.dtype.names if n not in skipcols]))
+ tz = np.array(
+ list([n for n in self._budget.dtype.names if n not in skipcols]))
fz = np.array(['TOTAL_OUT'] * len(tz))
self._update_budget_fromssst(fz, tz, outot, kstpkper, totim)
# Compute IN-OUT
- tz = np.array(list([n for n in self._budget.dtype.names if n not in skipcols]))
+ tz = np.array(
+ list([n for n in self._budget.dtype.names if n not in skipcols]))
f = intot - outot
fz = np.array(['IN-OUT'] * len(tz))
self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper, totim)
# Compute percent discrepancy
- tz = np.array(list([n for n in self._budget.dtype.names if n not in skipcols]))
+ tz = np.array(
+ list([n for n in self._budget.dtype.names if n not in skipcols]))
fz = np.array(['PERCENT_DISCREPANCY'] * len(tz))
in_minus_out = intot - outot
in_plus_out = intot + outot
@@ -1406,8 +1419,10 @@ def _compute_net_budget(self):
in_budget = self._budget[select_fields][select_records_in]
out_budget = self._budget[select_fields][select_records_out]
net_budget = in_budget.copy()
- for f in [n for n in self._zonenamedict.values() if n in select_fields]:
- net_budget[f] = np.array([r for r in in_budget[f]]) - np.array([r for r in out_budget[f]])
+ for f in [n for n in self._zonenamedict.values() if
+ n in select_fields]:
+ net_budget[f] = np.array([r for r in in_budget[f]]) - np.array(
+ [r for r in out_budget[f]])
newnames = ['_'.join(n.split('_')[1:]) for n in net_budget['name']]
net_budget['name'] = newnames
return net_budget
diff --git a/flopy/version.py b/flopy/version.py
index 9016af4c82..73a5c0d23c 100644
--- a/flopy/version.py
+++ b/flopy/version.py
@@ -1,11 +1,11 @@
# flopy version file automatically created using...pre-commit.py
-# created on...October 17, 2018 12:31:45
+# created on...October 17, 2018 18:17:12
major = 3
minor = 2
micro = 9
-build = 216
-commit = 2887
+build = 218
+commit = 2889
__version__ = '{:d}.{:d}.{:d}'.format(major, minor, micro)
__build__ = '{:d}.{:d}.{:d}.{:d}'.format(major, minor, micro, build)