Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Docs: fix many sphinx warnings #615

Merged
merged 2 commits into from
Jan 12, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion climada/engine/impact.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ def __init__(self,
"""
Init Impact object

Attributes
Parameters
----------
event_id : np.array, optional
id (>0) of each hazard event
Expand Down
16 changes: 7 additions & 9 deletions climada/engine/unsequa/calc_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -298,17 +298,15 @@ def sensitivity(self, unc_output, sensitivity_method = 'sobol',

Parameters
----------
unc_output : climada.engine.uncertainty.unc_output.UncOutput()
unc_output : climada.engine.uncertainty.unc_output.UncOutput
Uncertainty data object in which to store the sensitivity indices
sensitivity_method : str
sensitivity analysis method from SALib.analyse
Possible choices:
'fast', 'rbd_fact', 'morris', 'sobol', 'delta', 'ff'
The default is 'sobol'.
Note that in Salib, sampling methods and sensitivity analysis
methods should be used in specific pairs.
sensitivity_method : str, optional
Sensitivity analysis method from SALib.analyse. Possible choices: 'fast', 'rbd_fact',
'morris', 'sobol', 'delta', 'ff'. Note that in Salib, sampling methods and sensitivity
analysis methods should be used in specific pairs:
https://salib.readthedocs.io/en/latest/api.html
sensitivity_kwargs: dict(), optional
Default: 'sobol'
sensitivity_kwargs: dict, optional
Keyword arguments of the chosen SALib analyse method.
The default is to use SALib's default arguments.

Expand Down
4 changes: 1 addition & 3 deletions climada/engine/unsequa/calc_cost_benefit.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,9 +63,7 @@ class CalcCostBenefit(Calc):
('haz_input_var', 'ent_input_var', 'haz_fut_input_var', 'ent_fut_input_var')
_metric_names : tuple(str)
Names of the cost benefit output metrics
('tot_climate_risk', 'benefit', 'cost_ben_ratio',
'imp_meas_present', 'imp_meas_future')

('tot_climate_risk', 'benefit', 'cost_ben_ratio', 'imp_meas_present', 'imp_meas_future')
"""

_input_var_names = (
Expand Down
77 changes: 40 additions & 37 deletions climada/engine/unsequa/input_var.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,39 +68,40 @@ class InputVar():
--------

Categorical variable function: LitPop exposures with m,n exponents in [0,5]
import scipy as sp
def litpop_cat(m, n):
exp = Litpop.from_countries('CHE', exponent=[m, n])
return exp
distr_dict = {
'm': sp.stats.randint(low=0, high=5),
'n': sp.stats.randint(low=0, high=5)
}
iv_cat = InputVar(func=litpop_cat, distr_dict=distr_dict)

>>> import scipy as sp
>>> def litpop_cat(m, n):
... exp = Litpop.from_countries('CHE', exponent=[m, n])
... return exp
>>> distr_dict = {
... 'm': sp.stats.randint(low=0, high=5),
... 'n': sp.stats.randint(low=0, high=5)
... }
>>> iv_cat = InputVar(func=litpop_cat, distr_dict=distr_dict)

Continuous variable function: Impact function for TC
import scipy as sp
def imp_fun_tc(G, v_half, vmin, k, _id=1):
intensity = np.linspace(0, 150, num=100)
mdd = np.repeat(1, len(intensity))
paa = np.array([sigmoid_function(v, G, v_half, vmin, k)
for v in intensity])
imp_fun = ImpactFunc(haz_type='TC',
id=_id,
intensity_unit='m/s',
intensity=intensity,
mdd=mdd,
paa=paa)
imp_fun.check()
impf_set = ImpactFuncSet([imp_fun])
return impf_set
distr_dict = {"G": sp.stats.uniform(0.8, 1),
"v_half": sp.stats.uniform(50, 100),
"vmin": sp.stats.norm(loc=15, scale=30),
"k": sp.stats.randint(low=1, high=9)
}
iv_cont = InputVar(func=imp_fun_tc, distr_dict=distr_dict)

>>> import scipy as sp
>>> def imp_fun_tc(G, v_half, vmin, k, _id=1):
... intensity = np.linspace(0, 150, num=100)
... mdd = np.repeat(1, len(intensity))
... paa = np.array([sigmoid_function(v, G, v_half, vmin, k)
... for v in intensity])
... imp_fun = ImpactFunc(haz_type='TC',
... id=_id,
... intensity_unit='m/s',
... intensity=intensity,
... mdd=mdd,
... paa=paa)
... imp_fun.check()
... impf_set = ImpactFuncSet([imp_fun])
... return impf_set
>>> distr_dict = {"G": sp.stats.uniform(0.8, 1),
... "v_half": sp.stats.uniform(50, 100),
... "vmin": sp.stats.norm(loc=15, scale=30),
... "k": sp.stats.randint(low=1, high=9)
... }
>>> iv_cont = InputVar(func=imp_fun_tc, distr_dict=distr_dict)
"""

def __init__(
Expand Down Expand Up @@ -163,8 +164,9 @@ def plot(self, figsize=None):
figsize: tuple(int or float, int or float), optional
The figsize argument of matplotlib.pyplot.subplots()
The default is derived from the total number of plots (nplots) as:
nrows, ncols = int(np.ceil(nplots / 3)), min(nplots, 3)
figsize = (ncols * FIG_W, nrows * FIG_H)

>>> nrows, ncols = int(np.ceil(nplots / 3)), min(nplots, 3)
>>> figsize = (ncols * FIG_W, nrows * FIG_H)

Returns
-------
Expand Down Expand Up @@ -233,6 +235,7 @@ def haz(haz_list, n_ev=None, bounds_int=None, bounds_frac=None, bounds_freq=None
Helper wrapper for basic hazard uncertainty input variable

The following types of uncertainties can be added:

HE: sub-sampling events from the total event set
For each sub-sample, n_ev events are sampled with replacement.
HE is the value of the seed
Expand Down Expand Up @@ -300,6 +303,7 @@ def exp(exp_list, bounds_totval=None, bounds_noise=None):
Helper wrapper for basic exposure uncertainty input variable

The following types of uncertainties can be added:

ET: scale the total value (homogeneously)
The value at each exposure point is multiplied by a number
sampled uniformly from a distribution with
Expand Down Expand Up @@ -350,13 +354,14 @@ def exp(exp_list, bounds_totval=None, bounds_noise=None):

@staticmethod
def impfset(impf_set_list, haz_id_dict= None, bounds_mdd=None, bounds_paa=None,
bounds_impfi=None):
bounds_impfi=None):
"""
Helper wrapper for basic impact function set uncertainty input variable.

One impact function (chosen with haz_type and fun_id) is characterized.

The following types of uncertainties can be added:

MDD: scale the mdd (homogeneously)
The value of mdd at each intensity is multiplied by a number
sampled uniformly from a distribution with
Expand Down Expand Up @@ -436,6 +441,7 @@ def ent(impf_set_list, disc_rate, exp_list, meas_set, haz_id_dict,
fun_id will be affected by bounds_impfi, bounds_mdd, bounds_paa.

The following types of uncertainties can be added:

DR: value of constant discount rate (homogeneously)
The value of the discounts in each year is
sampled uniformly from a distribution with
Expand Down Expand Up @@ -473,10 +479,8 @@ def ent(impf_set_list, disc_rate, exp_list, meas_set, haz_id_dict,
sampled. For example, impact functions obtained from different
calibration methods.


If a bounds is None, this parameter is assumed to have no uncertainty.


Parameters
----------
bounds_disk : (float, float), optional
Expand Down Expand Up @@ -570,6 +574,7 @@ def entfut(impf_set_list, exp_list, meas_set, haz_id_dict,
fun_id will be affected by bounds_impfi, bounds_mdd, bounds_paa.

The following types of uncertainties can be added:

CO: scale the cost (homogeneously)
The cost of all measures is multiplied by the same number
sampled uniformly from a distribution with
Expand Down Expand Up @@ -603,10 +608,8 @@ def entfut(impf_set_list, exp_list, meas_set, haz_id_dict,
sampled. For example, impact functions obtained from different
calibration methods.


If a bounds is None, this parameter is assumed to have no uncertainty.


Parameters
----------
bounds_cost :(float, float), optional
Expand Down
15 changes: 9 additions & 6 deletions climada/engine/unsequa/unc_output.py
Original file line number Diff line number Diff line change
Expand Up @@ -414,8 +414,9 @@ def plot_sample(self, figsize=None):
figsize : tuple(int or float, int or float), optional
The figsize argument of matplotlib.pyplot.subplots()
The default is derived from the total number of plots (nplots) as:
nrows, ncols = int(np.ceil(nplots / 3)), min(nplots, 3)
figsize = (ncols * FIG_W, nrows * FIG_H)

>>> nrows, ncols = int(np.ceil(nplots / 3)), min(nplots, 3)
>>> figsize = (ncols * FIG_W, nrows * FIG_H)

Raises
------
Expand Down Expand Up @@ -730,8 +731,9 @@ def plot_sensitivity(self, salib_si='S1', salib_si_conf='S1_conf',
figsize : tuple(int or float, int or float), optional
The figsize argument of matplotlib.pyplot.subplots()
The default is derived from the total number of plots (nplots) as:
nrows, ncols = int(np.ceil(nplots / 3)), min(nplots, 3)
figsize = (ncols * FIG_W, nrows * FIG_H)

>>> nrows, ncols = int(np.ceil(nplots / 3)), min(nplots, 3)
>>> figsize = (ncols * FIG_W, nrows * FIG_H)
axes : matplotlib.pyplot.axes, optional
Axes handles to use for the plot. The default is None.
kwargs :
Expand Down Expand Up @@ -828,8 +830,9 @@ def plot_sensitivity_second_order(self, salib_si='S2', salib_si_conf='S2_conf',
figsize : tuple(int or float, int or float), optional
The figsize argument of matplotlib.pyplot.subplots()
The default is derived from the total number of plots (nplots) as:
nrows, ncols = int(np.ceil(nplots / 3)), min(nplots, 3)
figsize = (ncols * 5, nrows * 5)

>>> nrows, ncols = int(np.ceil(nplots / 3)), min(nplots, 3)
>>> figsize = (ncols * 5, nrows * 5)
axes : matplotlib.pyplot.axes, optional
Axes handles to use for the plot. The default is None.
kwargs :
Expand Down
35 changes: 27 additions & 8 deletions climada/entity/disc_rates/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ class DiscRates():

Attributes
---------
tag: Tag
tag: climada.entity.tag.Tag
information about the source data
years: np.array
list of years
Expand Down Expand Up @@ -235,9 +235,16 @@ def from_mat(cls, file_name, description='', var_names=None):
description: str, optional
description of the data. The default is ''
var_names: dict, optional
name of the variables in the file. The Default is
DEF_VAR_MAT = {'sup_field_name': 'entity', 'field_name': 'discount',
'var_name': {'year': 'year', 'disc': 'discount_rate'}}
name of the variables in the file. Default:

>>> DEF_VAR_MAT = {
... 'sup_field_name': 'entity',
... 'field_name': 'discount',
... 'var_name': {
... 'year': 'year',
... 'disc': 'discount_rate',
... }
... }

Returns
-------
Expand Down Expand Up @@ -282,8 +289,14 @@ def from_excel(cls, file_name, description='', var_names=None):
description of the data. The default is ''
var_names: dict, optional
name of the variables in the file. The Default is
DEF_VAR_EXCEL = {'sheet_name': 'discount',
'col_name': {'year': 'year', 'disc': 'discount_rate'}}

>>> DEF_VAR_EXCEL = {
... 'sheet_name': 'discount',
... 'col_name': {
... 'year': 'year',
... 'disc': 'discount_rate',
... }
... }

Returns
-------
Expand Down Expand Up @@ -320,8 +333,14 @@ def write_excel(self, file_name, var_names=None):
filename including path and extension
var_names: dict, optional
name of the variables in the file. The Default is
DEF_VAR_EXCEL = {'sheet_name': 'discount',
'col_name': {'year': 'year', 'disc': 'discount_rate'}}

>>> DEF_VAR_EXCEL = {
... 'sheet_name': 'discount',
... 'col_name': {
... 'year': 'year',
... 'disc': 'discount_rate',
... }
... }
"""
if var_names is None:
var_names = DEF_VAR_EXCEL
Expand Down
41 changes: 22 additions & 19 deletions climada/entity/exposures/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ class Exposures():

Attributes
----------
tag : Tag
tag : climada.entity.tag.Tag
metada - information about the source data
ref_year : int
metada - reference year
Expand All @@ -97,10 +97,10 @@ class Exposures():
CRS information inherent to GeoDataFrame.
value : pd.Series
a value for each exposure
impf_ : pd.Series, optional
impf_SUFFIX : pd.Series, optional
e.g. impf_TC. impact functions id for hazard TC.
There might be different hazards defined: impf_TC, impf_FL, ...
If not provided, set to default 'impf_' with ids 1 in check().
If not provided, set to default ``impf_`` with ids 1 in check().
geometry : pd.Series, optional
geometry of type Point of each instance.
Computed in method set_geometry_points().
Expand All @@ -117,7 +117,7 @@ class Exposures():
category id for each exposure
region_id : pd.Series, optional
region id for each exposure
centr_ : pd.Series, optional
centr_SUFFIX : pd.Series, optional
e.g. centr_TC. centroids index for hazard
TC. There might be different hazards defined: centr_TC, centr_FL, ...
Computed in method assign_centroids().
Expand Down Expand Up @@ -229,8 +229,8 @@ def check(self):
"""Check Exposures consistency.

Reports missing columns in log messages.
If no impf_* column is present in the dataframe, a default column 'impf_' is added with
default impact function id 1.
If no ``impf_*`` column is present in the dataframe, a default column ``impf_`` is added
with default impact function id 1.
"""
# mandatory columns
for var in self.vars_oblig:
Expand Down Expand Up @@ -338,10 +338,11 @@ def get_impf_column(self, haz_type=''):
-------
str
a column name, the first of the following that is present in the exposures' dataframe:
- impf_[haz_type]
- if_[haz_type]
- impf_
- if_

- ``impf_[haz_type]``
- ``if_[haz_type]``
- ``impf_``
- ``if_``

Raises
------
Expand Down Expand Up @@ -370,8 +371,9 @@ def assign_centroids(self, hazard, distance='euclidean',
threshold=u_coord.NEAREST_NEIGHBOR_THRESHOLD,
overwrite=True):
"""Assign for each exposure coordinate closest hazard coordinate.
-1 used for disatances > threshold in point distances. If raster hazard,
-1 used for centroids outside raster.

The value -1 is used for distances larger than ``threshold`` in point distances.
In case of raster hazards the value -1 is used for centroids outside of the raster.

Parameters
----------
Expand All @@ -392,23 +394,24 @@ def assign_centroids(self, hazard, distance='euclidean',

See Also
--------
climada.util.coordinates.assign_coordinates: method to associate centroids to
exposure points
climada.util.coordinates.assign_coordinates
method to associate centroids to exposure points

Notes
-----
The default order of use is:
1. if centroid raster is defined, assign exposures points to
the closest raster point.
2. if no raster, assign centroids to the nearest neighbor using
euclidian metric

1. if centroid raster is defined, assign exposures points to
the closest raster point.
2. if no raster, assign centroids to the nearest neighbor using
euclidian metric

Both cases can introduce innacuracies for coordinates in lat/lon
coordinates as distances in degrees differ from distances in meters
on the Earth surface, in particular for higher latitude and distances
larger than 100km. If more accuracy is needed, please use 'haversine'
distance metric. This however is slower for (quasi-)gridded data,
and works only for non-gridded data.

"""
haz_type = hazard.tag.haz_type
centr_haz = INDICATOR_CENTR + haz_type
Expand Down
Loading