From a0ddbede089ce5fee1bc152640496c66f5949e9c Mon Sep 17 00:00:00 2001
From: Prateek Bhustali
Date: Sat, 7 May 2022 16:03:17 +0200
Subject: [PATCH 01/59] Added sobol sensitivity
---
src/UQpy/sensitivity/baseclass/__init__.py | 0
src/UQpy/sensitivity/baseclass/pickfreeze.py | 64 ++
src/UQpy/sensitivity/baseclass/sensitivity.py | 318 ++++++
src/UQpy/sensitivity/sobol.py | 933 ++++++++++++++++++
4 files changed, 1315 insertions(+)
create mode 100644 src/UQpy/sensitivity/baseclass/__init__.py
create mode 100644 src/UQpy/sensitivity/baseclass/pickfreeze.py
create mode 100644 src/UQpy/sensitivity/baseclass/sensitivity.py
create mode 100644 src/UQpy/sensitivity/sobol.py
diff --git a/src/UQpy/sensitivity/baseclass/__init__.py b/src/UQpy/sensitivity/baseclass/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/src/UQpy/sensitivity/baseclass/pickfreeze.py b/src/UQpy/sensitivity/baseclass/pickfreeze.py
new file mode 100644
index 000000000..4e9e2f57e
--- /dev/null
+++ b/src/UQpy/sensitivity/baseclass/pickfreeze.py
@@ -0,0 +1,64 @@
+import copy
+
+
+def generate_pick_freeze_samples(dist_obj, n_samples, random_state=None):
+
+ """
+ Generate samples to be used in the Pick-and-Freeze algorithm.
+
+ **Outputs:**
+
+ * **A_samples** (`ndarray`):
+ Sample set A.
+ Shape: `(n_samples, num_vars)`.
+
+ * **B_samples** (`ndarray`):
+ Sample set B.
+ Shape: `(n_samples, num_vars)`.
+
+ * **C_i_generator** (`generator`):
+ Generator for the sample set C_i.
+ Generator is used so that samples
+ do not have to be stored in memory.
+ C_i is a 2D array with all columns
+ from B_samples, except column `i`,
+ which is from A_samples.
+ Shape: `(n_samples, num_vars)`.
+
+ * **D_i_generator** (`generator`):
+ Generator for the sample set C_i.
+ Generator is used so that samples
+ do not have to be stored in memory.
+ C_i is a 2D array with all columns
+ from A_samples, except column `i`,
+ which is from B_samples.
+ Shape: `(n_samples, num_vars)`.
+
+ """
+
+ # Generate samples for A and B
+ samples = dist_obj.rvs(n_samples * 2, random_state=random_state)
+
+ num_vars = samples.shape[1]
+
+ # Split samples into two sets A and B
+ A_samples = samples[:n_samples, :]
+ B_samples = samples[n_samples:, :]
+
+ # Iterator for generating C_i
+ def C_i_generator():
+ """Generate C_i for each i."""
+ for i in range(num_vars):
+ C_i = copy.deepcopy(B_samples) #! Deepcopy so B is unchanged
+ C_i[:, i] = A_samples[:, i]
+ yield C_i
+
+ # Iterator for generating D_i
+ def D_i_generator():
+ """Generate D_i for each i."""
+ for i in range(num_vars):
+ D_i = copy.deepcopy(A_samples) #! Deepcopy so A is unchanged
+ D_i[:, i] = B_samples[:, i]
+ yield D_i
+
+ return A_samples, B_samples, C_i_generator(), D_i_generator()
diff --git a/src/UQpy/sensitivity/baseclass/sensitivity.py b/src/UQpy/sensitivity/baseclass/sensitivity.py
new file mode 100644
index 000000000..af2adc594
--- /dev/null
+++ b/src/UQpy/sensitivity/baseclass/sensitivity.py
@@ -0,0 +1,318 @@
+"""
+
+This module contains the abstract Sensitivity class used by other
+sensitivity classes:
+1. Chatterjee indices
+2. Cramer-von Mises indices
+3. Generalised Sobol indices
+4. Sobol indices
+
+"""
+
+import copy
+import numpy as np
+import scipy.stats
+
+from UQpy.run_model import RunModel
+from UQpy.distributions.baseclass import DistributionContinuous1D
+from UQpy.distributions.collection import JointIndependent
+
+
+class Sensitivity:
+ def __init__(
+ self, runmodel_object, dist_object, random_state=None, **kwargs
+ ) -> None:
+
+ # Check RunModel object
+ if not isinstance(runmodel_object, RunModel):
+ raise TypeError("UQpy: runmodel_object must be an object of class RunModel")
+
+ self.runmodel_object = runmodel_object
+
+ # Check distributions
+ if isinstance(dist_object, list):
+ for i in range(len(dist_object)):
+ if not isinstance(dist_object[i], (DistributionContinuous1D, JointIndependent)):
+ raise TypeError(
+ "UQpy: A ``DistributionContinuous1D`` or ``JointInd`` object "
+ "must be provided."
+ )
+ else:
+ if not isinstance(dist_object, (DistributionContinuous1D, JointIndependent)):
+ raise TypeError(
+ "UQpy: A ``DistributionContinuous1D`` or ``JointInd`` object must be provided."
+ )
+
+ self.dist_object = dist_object
+
+ # Check random state
+ self.random_state = random_state
+ if isinstance(self.random_state, int):
+ self.random_state = np.random.RandomState(self.random_state)
+ elif not (
+ self.random_state is None
+ or isinstance(self.random_state, np.random.RandomState)
+ ):
+ raise TypeError(
+ "UQpy: random state should be None, an integer or np.random.RandomState object"
+ )
+
+ # wrapper created for convenience to generate model evaluations
+ def _run_model(self, samples):
+ """Generate model evaluations for a set of samples.
+
+ **Inputs**:
+
+ * **samples** (`numpy.ndarray`):
+ A set of samples.
+ Shape: `(n_samples, num_vars)`
+
+ **Outputs**:
+
+ * **model_evaluations** (`numpy.ndarray`):
+ A set of model evaluations.
+ Shape: `(n_samples,)`
+
+ if multioutput: `(n_samples, n_outputs)`
+
+ """
+
+ self.runmodel_object.run(samples=samples, append_samples=False)
+ model_evals = copy.deepcopy(np.array(self.runmodel_object.qoi_list))
+
+ return model_evals
+
+ @staticmethod
+ def bootstrap_sample_generator_1D(samples):
+ """Generate bootstrap samples.
+
+ Generators are used to avoid copying the entire array.
+
+ It will simply pick `N` random rows from the array.
+
+ For example:
+ Model evaluations for the samples in A in the pick and freeze estimator.
+
+ **Inputs:**
+
+ * **samples** (`ndarray`):
+ Model evaluations for the samples.
+ Shape: `(n_samples, 1)`.
+
+ **Outputs:**
+
+ * `generator`:
+ Generator for the bootstrap samples.
+
+ """
+ n_samples = samples.shape[0]
+
+ while True:
+ _indices = np.random.randint(0, high=n_samples, size=n_samples)
+
+ yield samples[_indices]
+
+ @staticmethod
+ def bootstrap_sample_generator_2D(samples):
+ """Generate bootstrap samples.
+
+ Generators are used to avoid copying the entire array.
+
+ For example:
+ Let's say we have '3' random variables
+ To pick bootstrap samples from f_C_i, we first
+ generate indices to pick values from each column
+ num_cols = 3
+ cols = [0, 1, 2]
+ _indices = [[3, 4, 8]
+ [6, 1, 2]
+ [0, 5, 7]
+ [4, 1, 0]] (4x3)
+ elements from f_C_i will be picked column-wise:
+ f_C_i[_indices[:, 0], 0]
+ f_C_i[_indices[:, 1], 1] etc.
+
+ **Inputs:**
+
+ * **samples** (`ndarray`):
+ Model evaluations for the samples.
+ Shape: `(n_samples, 1)`.
+
+ **Outputs:**
+
+ * `generator`:
+ Generator for the bootstrap samples.
+
+ """
+ n_samples = samples.shape[0]
+
+ num_cols = samples.shape[1]
+ cols = np.arange(num_cols)
+
+ while True:
+ # generate indices to pick N values from f_A, f_B and f_C_i
+ _indices = np.random.randint(0, high=n_samples, size=samples.shape)
+
+ yield samples[_indices, cols]
+
+ @staticmethod
+ def bootstrap_sample_generator_3D(samples):
+ """Generate bootstrap samples.
+
+ Generators are used to avoid copying the entire array.
+
+ For example:
+ Let's say we a model with multiple outputs.
+ We use the same approach as in the 2D
+ case for each slice the 3D array.
+ Here, slices refer to the 'depth' of the array,
+ given by array.shape[0].
+
+ **Inputs:**
+
+ * **samples** (`ndarray`):
+ Model evaluations for the samples.
+ Shape: `(n_outputs, n_samples, num_vars)`.
+
+ **Outputs:**
+
+ * `generator`:
+ Generator for the bootstrap samples.
+
+ """
+ n_samples = samples.shape[1]
+ array_shape = samples.shape[1:]
+ num_cols = samples.shape[2]
+ cols = np.arange(num_cols)
+
+ while True:
+ _indices = np.random.randint(0, high=n_samples, size=array_shape)
+
+ yield samples[:, _indices, cols]
+
+ def bootstrapping(
+ self,
+ estimator,
+ estimator_inputs,
+ qoi_mean,
+ num_bootstrap_samples,
+ confidence_level=0.95,
+ **kwargs,
+ ):
+
+ """An abstract method to implement bootstrapping.
+
+ **Inputs:**
+
+ * **estimator** (`function`):
+ A method/func which computes the statistical
+ quantities of interest (QoI).
+ Example: `compute_first_order_Sobol`
+ It must be a method/function that takes several `ndarray`s
+ of samples as input and returns a single `ndarray` of estimated value.
+
+ * **estimator_inputs** (`list`):
+ Inputs to the estimator concantenated in a list.
+
+ * **qoi_mean** (`ndarray`):
+ Mean of the QoI.
+ This is the value around which we
+ will compute the confidence interval.
+ Shape: `(n_qois, n_outputs)`.
+
+ * **num_bootstrap_samples** (`int`):
+ Number of bootstrap samples to generate.
+
+ * **confidence_level** (`float`):
+ Confidence level for the confidence interval.
+ Default: 0.95
+
+ **Outputs:**
+
+ * **confidence_interval_qoi** (`ndarray`):
+ Confidence interval for the quantity of interest (QoI).
+
+ """
+
+ n_qois = qoi_mean.shape[0]
+ n_outputs = qoi_mean.shape[1]
+
+ ##################### STORAGE #####################
+
+ # store generators of the inputs for bootstrap sampling
+ input_generators = []
+
+ # store the qoi computed using bootstrap samples
+ bootstrapped_qoi = np.zeros((n_outputs, n_qois, num_bootstrap_samples))
+
+ # store the confidence interval for each qoi
+ confidence_interval_qoi = np.zeros((n_outputs, n_qois, 2))
+
+ ##################### CREATE GENERATORS #####################
+
+ for i, input in enumerate(estimator_inputs):
+
+ if isinstance(input, np.ndarray):
+
+ # Example: f_A or f_B of models with single output.
+ # Shape: `(n_samples, 1)`.
+ if input.ndim == 2 and input.shape[1] == 1:
+ input_generators.append(self.bootstrap_sample_generator_1D(input))
+
+ # Example: f_C_i or f_D_i of models with single output.
+ # Shape: `(n_samples, num_vars)`.
+ elif input.ndim == 2 and input.shape[1] > 1:
+ input_generators.append(self.bootstrap_sample_generator_2D(input))
+
+ # Example: f_C_i or f_D_i of models with multiple outputs.
+ # Shape: `(n_outputs, n_samples, num_vars)`.
+ elif input.ndim == 3:
+ input_generators.append(self.bootstrap_sample_generator_3D(input))
+
+ # Example: if models evals is None.
+ elif input == None:
+ input_generators.append(input)
+
+ else:
+ raise ValueError(
+ f"UQpy: estimator_inputs[{i}] should be either None or `ndarray` of dimension 1, 2 or 3"
+ )
+
+ ################### BOOTSTRAPPING ##################
+
+ # Compute the qoi for each bootstrap sample
+ for j in range(num_bootstrap_samples):
+
+ # inputs to the estimator
+ args = []
+
+ # generate samples
+ for gen_input in input_generators:
+ if gen_input == None:
+ args.append(gen_input)
+ else:
+ args.append(gen_input.__next__())
+
+ bootstrapped_qoi[:, :, j] = estimator(*args, **kwargs).T
+
+ ################# CONFIDENCE INTERVAL ################
+
+ # Calculate confidence intervals
+ delta = -scipy.stats.norm.ppf((1 - confidence_level) / 2)
+
+ for output_j in range(n_outputs):
+
+ # estimate the standard deviation using the bootstrap indices
+ std_qoi = np.std(bootstrapped_qoi[output_j, :, :], axis=1, ddof=1)
+
+ lower_bound = qoi_mean[:, output_j] - delta * std_qoi
+ upper_bound = qoi_mean[:, output_j] + delta * std_qoi
+
+ confidence_interval_qoi[output_j, :, 0] = lower_bound
+ confidence_interval_qoi[output_j, :, 1] = upper_bound
+
+ # For models with single output, return 2D array.
+ if n_outputs == 1:
+ confidence_interval_qoi = confidence_interval_qoi[0, :, :]
+
+ return confidence_interval_qoi
diff --git a/src/UQpy/sensitivity/sobol.py b/src/UQpy/sensitivity/sobol.py
new file mode 100644
index 000000000..d4fb1de56
--- /dev/null
+++ b/src/UQpy/sensitivity/sobol.py
@@ -0,0 +1,933 @@
+"""
+
+The Sobol class computes the Sobol indices for single output and multi-output
+models. The Sobol indices can be computed using various pick-and-freeze
+schemes.
+
+The schemes implemented are listed below:
+
+# First order indices:
+- Sobol1993 [1]: Requires n_samples*(num_vars + 1) model evaluations
+- Saltelli2002 [3]: Requires n_samples*(2*num_vars + 1) model evaluations
+- Janon2014 [4]: Requires n_samples*(num_vars + 1) model evaluations
+
+# Second order indices:
+- Saltelli2002 [3]: Requires n_samples*(2*num_vars + 1) model evaluations
+
+# Total order indices:
+- Homma1996: Requires n_samples*(num_vars + 1) model evaluations
+- Saltelli2002 [3]: Requires n_samples*(2*num_vars + 1) model evaluations
+
+For more details on "Saltelli2002" refer to [3].
+
+Note: Apart from second order indices, the Saltelli2002 scheme provides
+ more accurate estimates of all indices, as opposed to Homma1996 or Sobol1993.
+ Because this method efficiently utilizes the higher number of model evaluations.
+
+Additionally, we can compute the confidence intervals for the Sobol indices
+using bootstrapping [2].
+
+
+References
+----------
+
+.. [1] Sobol, I.M. (1993) Sensitivity Estimates for Nonlinear Mathematical Models.
+ Mathematical Modelling and Computational Experiments, 4, 407-414.
+
+.. [2] Jeremy Orloff and Jonathan Bloom (2014), Bootstrap confidence intervals,
+ Introduction to Probability and Statistics, MIT OCW.
+
+.. [3] Saltelli, A. (2002). Making best use of model evaluations to
+ compute sensitivity indices.
+
+.. [4] Janon, Alexander; Klein, Thierry; Lagnoux, Agnes; Nodet, Maëlle;
+ Prior, Clementine. Asymptotic normality and efficiency of two Sobol index
+ estimators. ESAIM: Probability and Statistics, Volume 18 (2014), pp. 342-364.
+ doi:10.1051/ps/2013040. http://www.numdam.org/articles/10.1051/ps/2013040/
+
+"""
+
+import math
+import logging
+import itertools
+
+import numpy as np
+
+from UQpy.sensitivity.baseclass.sensitivity import Sensitivity
+from UQpy.sensitivity.baseclass.pickfreeze import generate_pick_freeze_samples
+from UQpy.utilities.UQpyLoggingFormatter import UQpyLoggingFormatter
+
+# TODO: Sampling strategies
+
+
+class Sobol(Sensitivity):
+ """
+ Compute Sobol sensitivity indices using the pick
+ and freeze algorithm. For models with multiple outputs
+ (vector-valued response), the sensitivity indices are computed for each
+ output separately.
+ For time-series models, the sensitivity indices are computed for each
+ time instant separately. (Pointwise-in-time Sobol indices)
+
+ **Inputs:**
+
+ * **runmodel_object** (``RunModel`` object):
+ The computational model. It should be of type
+ ``RunModel`` (see ``RunModel`` class).
+ The output QoI can be a scalar or vector of
+ length `ny`, then the sensitivity indices of
+ all `ny` outputs are computed independently.
+
+ * **dist_object** ((list of) ``Distribution`` object(s)):
+ List of ``Distribution`` objects corresponding
+ to each random variable, or ``JointInd`` object
+ (multivariate RV with independent marginals).
+
+ * **random_state** (None or `int` or ``numpy.random.RandomState`` object):
+ Random seed used to initialize the
+ pseudo-random number generator.
+ Default is None.
+
+ **Attributes:**
+
+ * **sobol_i** (`ndarray`):
+ First order sensitivity indices.
+ Shape: `(num_vars, n_outputs)`
+
+ * **sobol_total_i** (`ndarray`):
+ Total order sensitivity indices.
+ Shape: `(num_vars, n_outputs)`
+
+ * **sobol_ij** (`ndarray`):
+ Second order sensitivity indices.
+ Shape: `(num_second_order_terms, n_outputs)`
+
+ * **CI_sobol_i** (`ndarray`):
+ Confidence intervals for the first order sensitivity indices.
+ Shape: `(num_vars, 2)`
+
+ if multioutput: Shape: `(n_outputs, num_vars, 2)`
+
+ * **CI_sobol_total_i** (`ndarray`):
+ Confidence intervals for the total order sensitivity indices.
+ Shape: `(num_vars, 2)`
+
+ if multioutput: Shape: `(n_outputs, num_vars, 2)`
+
+ * **CI_sobol_ij** (`ndarray`):
+ Confidence intervals for the second order Sobol indices.
+ Shape: `(num_second_order_terms, 2)`
+
+ if multioutput: Shape: `(n_outputs, num_second_order_terms, 2)`
+
+ * **n_samples** (`int`):
+ Number of samples used to compute the sensitivity indices.
+
+ * **num_vars** (`int`):
+ Number of model input variables.
+
+ * **multioutput** (`bool`):
+ True if the model has multiple outputs.
+
+ **Methods:**
+ """
+
+ def __init__(
+ self, runmodel_object, dist_object, random_state=None, **kwargs
+ ) -> None:
+
+ super().__init__(runmodel_object, dist_object, random_state, **kwargs)
+
+ # Create logger with the same name as the class
+ self.logger = logging.getLogger(__name__)
+ self.logger.setLevel(logging.ERROR)
+ frmt = UQpyLoggingFormatter()
+
+ # create console handler with a higher log level
+ ch = logging.StreamHandler()
+ ch.setFormatter(frmt)
+
+ # add the handler to the logger
+ self.logger.addHandler(ch)
+
+ def run(
+ self,
+ n_samples=1_000,
+ num_bootstrap_samples=None,
+ confidence_level=0.95,
+ estimate_second_order=False,
+ first_order_scheme="Janon2014",
+ total_order_scheme="Homma1996",
+ second_order_scheme="Saltelli2002",
+ ):
+
+ """
+ Compute the sensitivity indices and confidence intervals.
+
+ **Inputs:**
+
+ * **n_samples** (`int`):
+ Number of samples used to compute the sensitivity indices.
+ Default is 1,000.
+
+ * **num_boostrap_samples** (`int`):
+ Number of bootstrap samples used to compute
+ the confidence intervals.
+ Default is None.
+
+ * **confidence_interval** (`float`):
+ Confidence interval used to compute the confidence intervals.
+ Default is 0.95.
+
+ * **estimate_second_order** (`bool`):
+ If True, compute the second order sensitivity indices.
+ Default is False.
+
+ * **first_order_scheme** (`str`):
+ Scheme used to compute the first order Sobol indices.
+ Default is "Sobol1993".
+
+ * **total_order_scheme** (`str`):
+ Scheme used to compute the total order Sobol indices.
+ Default is "Homma1996".
+
+ * **second_order_scheme** (`str`):
+ Scheme used to compute the second order Sobol indices.
+ Default is "Saltelli2002".
+
+ **Outputs:**
+
+ * **computed_indices** (`dict`):
+ Dictionary containing the computed sensitivity indices.
+
+ * **sobol_i** (`ndarray`):
+ First order Sobol indices.
+ Shape: `(num_vars, n_outputs)`
+
+ * **sobol_total_i** (`ndarray`):
+ Total order Sobol indices.
+ Shape: `(num_vars, n_outputs)`
+
+ * **sobol_ij** (`ndarray`):
+ Second order Sobol indices.
+ Shape: `(num_second_order_terms, n_outputs)`
+
+ * **CI_sobol_i** (`ndarray`):
+ Confidence intervals for the first order Sobol indices.
+ Shape: `(num_vars, 2)`
+
+ if multioutput: Shape: `(n_outputs, num_vars, 2)`
+
+ * **CI_sobol_total_i** (`ndarray`):
+ Confidence intervals for the total order Sobol indices.
+ Shape: `(num_vars, 2)`
+
+ if multioutput: Shape: `(n_outputs, num_vars, 2)`
+
+ * **CI_sobol_ij** (`ndarray`):
+ Confidence intervals for the second order Sobol indices.
+ Shape: `(num_second_order_terms, 2)`
+
+ if multioutput: Shape: `(n_outputs, num_second_order_terms, 2)`
+
+ """
+ # Check n_samples data type
+ self.n_samples = n_samples
+ if not isinstance(self.n_samples, int):
+ raise TypeError("UQpy: n_samples should be an integer.")
+
+ # Check num_bootstrap_samples data type
+ if num_bootstrap_samples is not None:
+ if not isinstance(num_bootstrap_samples, int):
+ raise TypeError("UQpy: num_bootstrap_samples should be an integer.")
+ elif num_bootstrap_samples is None:
+ self.logger.info(
+ "UQpy: num_bootstrap_samples is set to None, confidence intervals will not be computed."
+ )
+
+ ################## GENERATE SAMPLES ##################
+
+ (
+ A_samples,
+ B_samples,
+ C_i_generator,
+ D_i_generator,
+ ) = generate_pick_freeze_samples(
+ self.dist_object, self.n_samples, self.random_state
+ )
+
+ self.logger.info("UQpy: Generated samples using the pick-freeze scheme.")
+
+ self.num_vars = A_samples.shape[1] # Number of variables
+
+ ################# MODEL EVALUATIONS ####################
+
+ A_model_evals = self._run_model(A_samples) # shape: (n_samples, n_outputs)
+
+ self.logger.info("UQpy: Model evaluations A completed.")
+
+ B_model_evals = self._run_model(B_samples) # shape: (n_samples, n_outputs)
+
+ self.logger.info("UQpy: Model evaluations B completed.")
+
+ # Check the number of outputs of the model
+ try:
+ self.n_outputs = A_model_evals.shape[1]
+ except:
+ self.n_outputs = 1
+
+ # multioutput flag
+ self.multioutput = True if self.n_outputs > 1 else False
+
+ if not self.multioutput:
+ A_model_evals = A_model_evals.reshape(-1, 1)
+ B_model_evals = B_model_evals.reshape(-1, 1)
+
+ C_i_model_evals = np.zeros((self.n_outputs, self.n_samples, self.num_vars))
+
+ for i, C_i in enumerate(C_i_generator):
+ C_i_model_evals[:, :, i] = self._run_model(C_i).T
+
+ self.logger.info("UQpy: Model evaluations C completed.")
+
+ # Compute D_i_model_evals only if needed
+ if estimate_second_order or total_order_scheme == "Saltelli2002":
+
+ D_i_model_evals = np.zeros((self.n_outputs, self.n_samples, self.num_vars))
+
+ for i, D_i in enumerate(D_i_generator):
+ D_i_model_evals[:, :, i] = self._run_model(D_i).T
+
+ self.logger.info("UQpy: Model evaluations D completed.")
+
+ else:
+ D_i_model_evals = None
+
+ self.logger.info("UQpy: All model evaluations computed successfully.")
+
+ ######################### STORAGE ########################
+
+ # Create dictionary to store the sensitivity indices
+ computed_indices = {}
+
+ ################## COMPUTE SOBOL INDICES ##################
+
+ # First order Sobol indices
+ self.sobol_i = compute_first_order(
+ A_model_evals,
+ B_model_evals,
+ C_i_model_evals,
+ D_i_model_evals,
+ scheme=first_order_scheme,
+ )
+
+ self.logger.info("UQpy: First order Sobol indices computed successfully.")
+
+ computed_indices["sobol_i"] = self.sobol_i
+
+ # Total order Sobol indices
+ self.sobol_total_i = compute_total_order(
+ A_model_evals,
+ B_model_evals,
+ C_i_model_evals,
+ D_i_model_evals,
+ scheme=total_order_scheme,
+ )
+
+ self.logger.info("UQpy: Total order Sobol indices computed successfully.")
+
+ computed_indices["sobol_total_i"] = self.sobol_total_i
+
+ if estimate_second_order:
+
+ # Second order Sobol indices
+ self.sobol_ij = compute_second_order(
+ A_model_evals,
+ B_model_evals,
+ C_i_model_evals,
+ D_i_model_evals,
+ computed_indices["sobol_i"],
+ scheme=second_order_scheme,
+ )
+
+ self.logger.info("UQpy: Second order Sobol indices computed successfully.")
+
+ computed_indices["sobol_ij"] = self.sobol_ij
+
+ ################## CONFIDENCE INTERVALS ####################
+
+ if num_bootstrap_samples is not None:
+
+ self.logger.info("UQpy: Computing confidence intervals ...")
+
+ estimator_inputs = [
+ A_model_evals,
+ B_model_evals,
+ C_i_model_evals,
+ D_i_model_evals,
+ ]
+
+ # First order Sobol indices
+ self.CI_sobol_i = self.bootstrapping(
+ compute_first_order,
+ estimator_inputs,
+ computed_indices["sobol_i"],
+ num_bootstrap_samples,
+ confidence_level,
+ scheme=first_order_scheme,
+ )
+
+ self.logger.info(
+ "UQpy: Confidence intervals for First order Sobol indices computed successfully."
+ )
+
+ computed_indices["CI_sobol_i"] = self.CI_sobol_i
+
+ # Total order Sobol indices
+ self.CI_sobol_total_i = self.bootstrapping(
+ compute_total_order,
+ estimator_inputs,
+ computed_indices["sobol_total_i"],
+ num_bootstrap_samples,
+ confidence_level,
+ scheme=total_order_scheme,
+ )
+
+ self.logger.info(
+ "UQpy: Confidence intervals for Total order Sobol indices computed successfully."
+ )
+
+ computed_indices["CI_sobol_total_i"] = self.CI_sobol_total_i
+
+ # Second order Sobol indices
+ if estimate_second_order:
+ self.CI_sobol_ij = self.bootstrapping(
+ compute_second_order,
+ estimator_inputs,
+ computed_indices["sobol_ij"],
+ num_bootstrap_samples,
+ confidence_level,
+ first_order_sobol=computed_indices["sobol_i"],
+ scheme=second_order_scheme,
+ )
+
+ self.logger.info(
+ "UQpy: Confidence intervals for Second order Sobol indices computed successfully."
+ )
+
+ computed_indices["CI_sobol_ij"] = self.CI_sobol_ij
+
+ return computed_indices
+
+
+###################### Pick and Freeze Methods #####################
+
+"""
+
+These methods are also called by other sensitivity methods (such as Chatterjee,
+Cramer-von Mises) to estimate the Sobol indices and therefore are implemented as
+functions and not static methods in the Sobol class.
+
+
+#! Saltelli2002
+--------------------------------------------------------------------------------
+
+Sobol indices estimated as per Theorem 2 in [3]_. Refer page 7 in
+[3]_ for details.
+
+Since there are several sets of function evaluations available,
+there are several ways to estimate E[Y]^2 and V[Y].
+Below we summarise the evaluations to be used as given in Theorem 2.
+
+# First-order indices:
+ - E[Y]^2 : f_A, f_B
+ - V[Y] : f_A
+ - S_i = ( /N - E[Y]^2 ) / V[Y]
+
+
+# Second-order indices:
+ - Estimate 1:
+ - E[Y]^2 : f_C_l, f_D_l -> l = max(i,j)
+ - V[Y] : f_C_j or f_D_i
+ - V^c_ij = f_D_i, f_C_j
+
+ - Estimate 2:
+ - E[Y]^2: f_C_l, f_D_l -> l = min(i,j)
+ - V[Y] : f_C_i or f_D_j
+ - V^c_ij = f_D_j, f_C_i
+
+ where:
+ S_ij = S^c_ij - S_i - S_j
+ S^c_ij = ( /N - E[Y]^2 ) / V[Y] # Esimate 1
+ = ( /N - E[Y]^2 ) / V[Y] # Esimate 2
+
+# Total-order indices:
+ - E[Y]^2 : f_B
+ - V[Y] : f_B
+ - S_T_i = 1 - ( /N - E[Y]^2 ) / V[Y]
+
+For m=5, the Sobol indices are estimated as follows:
+First order indices: 2 estimates
+Second order indices: 2 estimates
+Total order indices: 2 estimates
+S_{-ij}: 2 estimates
++-------+--------+---------+---------+---------+---------+--------+---------+---------+---------+---------+-------+------+
+| | f_B | f_C_1 | f_C_2 | f_C_3 | f_C_4 | f_C_5 | f_D_1 | f_D_2 | f_D_3 | f_D_4 | f_D_5 | f_A |
++=======+========+=========+=========+=========+=========+========+=========+=========+=========+=========+=======+======+
+| f_B | V[Y] | | | | | | | | | | | |
++-------+--------+---------+---------+---------+---------+--------+---------+---------+---------+---------+-------+------+
+| f_C_1 | S_T_1 | V[Y] | | | | | | | | | | |
++-------+--------+---------+---------+---------+---------+--------+---------+---------+---------+---------+-------+------+
+| f_C_2 | S_T_2 | V^c_-12 | V[Y] | | | | | | | | | |
++-------+--------+---------+---------+---------+---------+--------+---------+---------+---------+---------+-------+------+
+| f_C_3 | S_T_3 | V^c_-13 | V^c_-23 | V[Y] | | | | | | | | |
++-------+--------+---------+---------+---------+---------+--------+---------+---------+---------+---------+-------+------+
+| f_C_4 | S_T_4 | V^c_-14 | V^c_-24 | V^c_-34 | V[Y] | | | | | | | |
++-------+--------+---------+---------+---------+---------+--------+---------+---------+---------+---------+-------+------+
+| f_C_5 | S_T_5 | V^c_-15 | V^c_-25 | V^c_-35 | V^c_-45 | V[Y] | | | | | | |
++-------+--------+---------+---------+---------+---------+--------+---------+---------+---------+---------+-------+------+
+| f_D_1 | S_1 | E^2[Y] | V^c_12 | V^c_13 | V^c_14 | V^c_15 | V[Y] | | | | | |
++-------+--------+---------+---------+---------+---------+--------+---------+---------+---------+---------+-------+------+
+| f_D_2 | S_2 | V^c_12 | E^2[Y] | V^c_23 | V^c_24 | V^c_25 | V^c_-12 | V[Y] | | | | |
++-------+--------+---------+---------+---------+---------+--------+---------+---------+---------+---------+-------+------+
+| f_D_3 | S_3 | V^c_13 | V^c_23 | E^2[Y] | V^c_34 | V^c_35 | V^c_-13 | V^c_-23 | V[Y] | | | |
++-------+--------+---------+---------+---------+---------+--------+---------+---------+---------+---------+-------+------+
+| f_D_4 | S_4 | V^c_14 | V^c_24 | V^c_34 | E^2[Y] | V^c_45 | V^c_-14 | V^c_-24 | V^c_-34 | V[Y] | | |
++-------+--------+---------+---------+---------+---------+--------+---------+---------+---------+---------+-------+------+
+| f_D_5 | S_5 | V^c_15 | V^c_25 | V^c_35 | V^c_45 | E^2[Y] | V^c_-15 | V^c_-25 | V^c_-35 | V^c_-45 | V[Y] | |
++-------+--------+---------+---------+---------+---------+--------+---------+---------+---------+---------+-------+------+
+| f_A | E^2[Y] | S_1 | S_2 | S_3 | S_4 | S_5 | S_T_1 | S_T_2 | S_T_3 | S_T_4 | S_T_5 | V[Y] |
++-------+--------+---------+---------+---------+---------+--------+---------+---------+---------+---------+-------+------+
+
+For m>5, we can follow the same procedure as above.
+
+For m = 4, the Sobol indices are estimated as follows:
+First order indices: 2 estimates
+Second order indices: 4 estimates
+Total order indices: 2 estimates
++-------+--------+--------+--------+--------+--------+--------+--------+--------+-------+------+
+| | f_B | f_C_1 | f_C_2 | f_C_3 | f_C_4 | f_D_1 | f_D_2 | f_D_3 | f_D_4 | f_A |
++=======+========+========+========+========+========+========+========+========+=======+======+
+| f_B | V[Y] | | | | | | | | | |
++-------+--------+--------+--------+--------+--------+--------+--------+--------+-------+------+
+| f_C_1 | S_T_1 | V[Y] | | | | | | | | |
++-------+--------+--------+--------+--------+--------+--------+--------+--------+-------+------+
+| f_C_2 | S_T_2 | V^c_34 | V[Y] | | | | | | | |
++-------+--------+--------+--------+--------+--------+--------+--------+--------+-------+------+
+| f_C_3 | S_T_3 | V^c_24 | V^c_14 | V[Y] | | | | | | |
++-------+--------+--------+--------+--------+--------+--------+--------+--------+-------+------+
+| f_C_4 | S_T_4 | V^c_23 | V^c_13 | V^c_12 | V[Y] | | | | | |
++-------+--------+--------+--------+--------+--------+--------+--------+--------+-------+------+
+| f_D_1 | S_1 | E^2[Y] | V^c_12 | V^c_13 | V^c_14 | V[Y] | | | | |
++-------+--------+--------+--------+--------+--------+--------+--------+--------+-------+------+
+| f_D_2 | S_2 | V^c_12 | E^2[Y] | V^c_23 | V^c_24 | V^c_34 | V[Y] | | | |
++-------+--------+--------+--------+--------+--------+--------+--------+--------+-------+------+
+| f_D_3 | S_3 | V^c_13 | V^c_23 | E^2[Y] | V^c_34 | V^c_25 | V^c_14 | V[Y] | | |
++-------+--------+--------+--------+--------+--------+--------+--------+--------+-------+------+
+| f_D_4 | S_4 | V^c_14 | V^c_24 | V^c_34 | E^2[Y] | V^c_23 | V^c_13 | V^c_12 | V[Y] | |
++-------+--------+--------+--------+--------+--------+--------+--------+--------+-------+------+
+| f_A | E^2[Y] | S_1 | S_2 | S_3 | S_4 | S_T_1 | S_T_2 | S_T_3 | S_T_4 | V[Y] |
++-------+--------+--------+--------+--------+--------+--------+--------+--------+-------+------+
+
+For m = 3, the Sobol indices are estimated as follows:
+First order indices: 4 estimates
+Second order indices: 2 estimates
+Total order indices: 2 estimates
++-------+--------+--------+--------+--------+-------+-------+-------+------+
+| | f_B | f_C_1 | f_C_2 | f_C_3 | f_D_1 | f_D_2 | f_D_3 | f_A |
++=======+========+========+========+========+=======+=======+=======+======+
+| f_B | V[Y] | | | | | | | |
++-------+--------+--------+--------+--------+-------+-------+-------+------+
+| f_C_1 | S_T_1 | V[Y] | | | | | | |
++-------+--------+--------+--------+--------+-------+-------+-------+------+
+| f_C_2 | S_T_2 | S_3 | V[Y] | | | | | |
++-------+--------+--------+--------+--------+-------+-------+-------+------+
+| f_C_3 | S_T_3 | S_2 | S_1 | V[Y] | | | | |
++-------+--------+--------+--------+--------+-------+-------+-------+------+
+| f_D_1 | S_1 | E^2[Y] | V^c_12 | V^c_13 | V[Y] | | | |
++-------+--------+--------+--------+--------+-------+-------+-------+------+
+| f_D_2 | S_2 | V^c_12 | E^2[Y] | V^c_23 | S_3 | V[Y] | | |
++-------+--------+--------+--------+--------+-------+-------+-------+------+
+| f_D_3 | S_3 | V^c_13 | V^c_23 | E^2[Y] | S_2 | S_1 | V[Y] | |
++-------+--------+--------+--------+--------+-------+-------+-------+------+
+| f_A | E^2[Y] | S_1 | S_2 | S_3 | S_T_1 | S_T_2 | S_T_3 | V[Y] |
++-------+--------+--------+--------+--------+-------+-------+-------+------+
+
+"""
+
+
+def compute_first_order(
+ A_model_evals,
+ B_model_evals,
+ C_i_model_evals,
+ D_i_model_evals=None,
+ scheme="Janon2014",
+):
+
+ """
+ Compute first order Sobol' indices using the Pick-and-Freeze scheme.
+
+ For the Sobol1996 scheme:
+ For computing the first order Sobol' indices, only f_A_model_evals and
+ f_C_i_model_evals are required. The other inputs are optional.
+ f_B_model_evals is set to None if f_B_model_evals is not provided.
+
+ **Inputs:**
+
+ * **A_model_evals** (`ndarray`):
+ Shape: `(n_samples, n_outputs)`.
+
+ * **B_model_evals** (`ndarray`):
+ If not available, pass `None`.
+ Shape: `(n_samples, n_outputs)`.
+
+ * **C_i_model_evals** (`ndarray`):
+ Shape: `(n_outputs, n_samples, num_vars)`.
+
+ * **D_i_model_evals** (`ndarray`, optional):
+ Shape: `(n_outputs, n_samples, num_vars)`.
+
+ * **scheme** (`str`, optional):
+ Scheme to use for computing the first order Sobol' indices.
+ Default: 'Sobol1993'.
+
+ **Outputs:**
+
+ * **first_order_sobol** (`ndarray`):
+ First order Sobol' indices.
+ Shape: `(num_vars, n_outputs)`.
+
+ """
+
+ n_samples = A_model_evals.shape[0]
+ n_outputs = A_model_evals.shape[1]
+ num_vars = C_i_model_evals.shape[2]
+
+ # Store first order Sobol' indices
+ first_order_sobol = np.zeros((num_vars, n_outputs))
+
+ if scheme == "Sobol1993":
+
+ for output_j in range(n_outputs):
+
+ f_A = A_model_evals[:, output_j]
+ f_B = B_model_evals[:, output_j] if B_model_evals is not None else None
+
+ # combine all model evaluations
+ # to improve accuracy of the estimator
+ _all_model_evals = np.append(f_A, f_B) if f_B is not None else f_A
+ f_0 = np.mean(_all_model_evals) # scalar
+
+ f_0_square = f_0**2
+ total_variance = np.var(_all_model_evals, ddof=1)
+
+ for var_i in range(num_vars):
+
+ f_C_i = C_i_model_evals[output_j, :, var_i]
+
+ S_i = (np.dot(f_A, f_C_i) / n_samples - f_0_square) / total_variance
+
+ first_order_sobol[var_i, output_j] = S_i
+
+ elif scheme == "Janon2014":
+
+ for output_j in range(n_outputs):
+
+ f_A = A_model_evals[:, output_j]
+
+ for var_i in range(num_vars):
+
+ f_C_i = C_i_model_evals[output_j, :, var_i]
+
+ # combine all model evaluations
+ # to improve accuracy of the estimator
+ _all_model_evals = np.append(f_A, f_C_i)
+ f_0 = np.mean(_all_model_evals)
+
+ f_0_square = f_0**2
+ total_variance = np.mean(_all_model_evals**2) - f_0_square
+
+ S_i = (np.dot(f_A, f_C_i) / n_samples - f_0_square) / total_variance
+
+ first_order_sobol[var_i, output_j] = S_i
+
+ elif scheme == "Saltelli2002":
+
+ """
+ Number of estimates for first order indices is 4 if
+ num_vars is 3, else 2.
+
+ """
+
+ for output_j in range(n_outputs):
+
+ f_A = A_model_evals[:, output_j]
+ f_B = B_model_evals[:, output_j]
+ f_0_square = np.dot(f_A, f_B) / n_samples
+ total_variance = np.var(f_A, ddof=1)
+
+ for var_i in range(num_vars):
+
+ f_C_i = C_i_model_evals[output_j, :, var_i]
+ f_D_i = D_i_model_evals[output_j, :, var_i]
+
+ # (Estimate 1)
+ est_1 = (np.dot(f_A, f_C_i) / n_samples - f_0_square) / total_variance
+
+ # (Estimate 2)
+ est_2 = (np.dot(f_B, f_D_i) / n_samples - f_0_square) / total_variance
+
+ if num_vars == 3:
+
+ # list of variable indices
+ list_vars = list(range(num_vars))
+ list_vars.remove(var_i)
+ # combination of all remaining variables indices
+ rem_vars_perm = list(itertools.permutations(list_vars, 2))
+
+ # (Estimate 3)
+ var_a, var_b = rem_vars_perm[0]
+ f_C_a = C_i_model_evals[output_j, :, var_a]
+ f_C_b = C_i_model_evals[output_j, :, var_b]
+ est_3 = (
+ np.dot(f_C_a, f_C_b) / n_samples - f_0_square
+ ) / total_variance
+
+ # (Estimate 4)
+ var_a, var_b = rem_vars_perm[1]
+ f_D_a = D_i_model_evals[output_j, :, var_a]
+ f_D_b = D_i_model_evals[output_j, :, var_b]
+ est_4 = (
+ np.dot(f_D_a, f_D_b) / n_samples - f_0_square
+ ) / total_variance
+
+ first_order_sobol[var_i, output_j] = (
+ est_1 + est_2 + est_3 + est_4
+ ) / 4
+
+ else:
+ first_order_sobol[var_i, output_j] = (est_1 + est_2) / 2
+
+ return first_order_sobol
+
+
+def compute_total_order(
+ A_model_evals,
+ B_model_evals,
+ C_i_model_evals,
+ D_i_model_evals=None,
+ scheme="Homma1996",
+):
+
+ """
+ Compute total order Sobol' indices using the Pick-and-Freeze scheme.
+
+ For the Homma1996 scheme:
+ For computing the first order Sobol' indices, only f_B_model_evals and
+ f_C_i_model_evals are required.
+ f_A_model_evals is set to None if f_A_model_evals is not provided.
+
+ **Inputs:**
+
+ * **A_model_evals** (`ndarray`):
+ If not available, pass `None`.
+ Shape: `(n_samples, n_outputs)`.
+
+ * **B_model_evals** (`ndarray`):
+ Shape: `(n_samples, n_outputs)`.
+
+ * **C_i_model_evals** (`ndarray`):
+ Shape: `(n_outputs, n_samples, num_vars)`.
+
+ * **D_i_model_evals** (`ndarray`, optional):
+ Shape: `(n_outputs, n_samples, num_vars)`.
+
+ * **scheme** (`str`, optional):
+ Scheme to use for computing the total order Sobol' indices.
+ Default: 'Homma1996'.
+
+ **Outputs:**
+
+ * **total_order_sobol** (`ndarray`):
+ Total order Sobol' indices.
+ Shape: `(num_vars, n_outputs)`.
+
+ """
+
+ n_samples = A_model_evals.shape[0]
+ n_outputs = A_model_evals.shape[1]
+ num_vars = C_i_model_evals.shape[2]
+
+ # Store total order Sobol' indices
+ total_order_sobol = np.zeros((num_vars, n_outputs))
+
+ if scheme == "Homma1996":
+
+ for output_j in range(n_outputs):
+
+ f_A = A_model_evals[:, output_j] if A_model_evals is not None else None
+ f_B = B_model_evals[:, output_j]
+
+ # combine all model evaluations
+ # to improve accuracy of the estimator
+ _all_model_evals = np.append(f_A, f_B) if f_A is not None else f_B
+ f_0 = np.mean(_all_model_evals) # scalar
+
+ f_0_square = f_0**2
+ total_variance = np.var(_all_model_evals, ddof=1)
+
+ for var_i in range(num_vars):
+
+ f_C_i = C_i_model_evals[output_j, :, var_i]
+
+ S_T_i = (
+ 1 - (np.dot(f_B, f_C_i) / n_samples - f_0_square) / total_variance
+ )
+
+ total_order_sobol[var_i, output_j] = S_T_i
+
+ elif scheme == "Saltelli2002":
+
+ for output_j in range(n_outputs):
+
+ f_A = A_model_evals[:, output_j]
+ f_B = B_model_evals[:, output_j]
+ f_0_square = np.mean(f_B) ** 2
+ total_variance = np.var(f_B, ddof=1)
+
+ for var_i in range(num_vars):
+
+ f_C_i = C_i_model_evals[output_j, :, var_i]
+ f_D_i = D_i_model_evals[output_j, :, var_i]
+
+ # (Estimate 1)
+ est_1 = (
+ 1 - (np.dot(f_B, f_C_i) / n_samples - f_0_square) / total_variance
+ )
+
+ # (Estimate 2)
+ est_2 = (
+ 1 - (np.dot(f_A, f_D_i) / n_samples - f_0_square) / total_variance
+ )
+
+ total_order_sobol[var_i, output_j] = (est_1 + est_2) / 2
+
+ return total_order_sobol
+
+
+def compute_second_order(
+ A_model_evals,
+ B_model_evals,
+ C_i_model_evals,
+ D_i_model_evals,
+ first_order_sobol=None, # None to make it a make keyword argument
+ scheme="Saltelli2002",
+):
+ """
+ Compute the second order Sobol indices using the Pick-and-Freeze scheme.
+
+ NOTE:
+ - Number of estimates for second order indices is 4 if
+ num_vars is 4, else 2.
+
+ - Although the B_model_evals are not being used currently, they are
+ included for use in estimate 3 and 4 for case num_vars = 4.
+
+ **Inputs:**
+
+ * **A_model_evals** (`ndarray`):
+ Shape: `(n_samples, n_outputs)`.
+
+ * **B_model_evals** (`ndarray`):
+ Shape: `(n_samples, n_outputs)`.
+
+ * **C_i_model_evals** (`ndarray`):
+ Shape: `(n_outputs, n_samples, num_vars)`.
+
+ * **D_i_model_evals** (`ndarray`, optional):
+ Shape: `(n_outputs, n_samples, num_vars)`.
+
+ * **first_order_sobol** (`ndarray`):
+ First order Sobol' indices.
+ Shape: `(num_vars, n_outputs)`.
+
+ * **scheme** (`str`, optional):
+ Scheme to use for computing the first order Sobol' indices.
+ Default: 'Sobol1993'.
+
+ **Outputs:**
+
+ * **second_order_sobol** (`ndarray`):
+ Second order Sobol indices.
+ Shape: `(num_second_order_terms, n_outputs)`.
+ """
+
+ n_samples = A_model_evals.shape[0]
+ n_outputs = A_model_evals.shape[1]
+ num_vars = C_i_model_evals.shape[2]
+
+ second_order_terms = itertools.combinations(range(num_vars), 2)
+ second_order_terms = list(second_order_terms)
+ num_second_order_terms = math.comb(num_vars, 2)
+
+ # Store second order Sobol' indices
+ second_order_sobol = np.zeros((num_second_order_terms, n_outputs))
+
+ if scheme == "Saltelli2002":
+
+ for output_j in range(n_outputs):
+
+ for k in range(num_second_order_terms):
+
+ var_a, var_b = second_order_terms[k]
+ S_a = first_order_sobol[var_a, output_j]
+ S_b = first_order_sobol[var_b, output_j]
+
+ # (Estimate 1)
+ var_c = np.max([var_a, var_b])
+ f_C_c = C_i_model_evals[output_j, :, var_c]
+ f_D_c = D_i_model_evals[output_j, :, var_c]
+ f_0_square = np.dot(f_D_c, f_C_c) / n_samples
+ total_variance = np.var(f_D_c, ddof=1)
+
+ f_C_a = C_i_model_evals[output_j, :, var_a]
+ f_D_b = D_i_model_evals[output_j, :, var_b]
+ S_c_ab_1 = (
+ np.dot(f_C_a, f_D_b) / n_samples - f_0_square
+ ) / total_variance
+
+ est_1 = S_c_ab_1 - S_a - S_b
+
+ # (Estimate 2)
+ var_c = np.min([var_a, var_b])
+ f_C_c = C_i_model_evals[output_j, :, var_c]
+ f_D_c = D_i_model_evals[output_j, :, var_c]
+ f_0_square = np.dot(f_D_c, f_C_c) / n_samples
+ total_variance = np.var(f_D_c, ddof=1)
+
+ f_D_a = D_i_model_evals[output_j, :, var_a]
+ f_C_b = C_i_model_evals[output_j, :, var_b]
+ S_c_ab_2 = (
+ np.dot(f_D_a, f_C_b) / n_samples - f_0_square
+ ) / total_variance
+
+ est_2 = S_c_ab_2 - S_a - S_b
+
+ if num_vars == 4:
+
+ # (Estimate 3)
+ # TODO: How to compute this?
+
+ # (Estimate 4)
+ # TODO: How to compute this?
+
+ # second_order_sobol[k, output_j] = (
+ # est_1 + est_2 + est_3 + est_4
+ # ) / 4
+
+ pass
+
+ else:
+ second_order_sobol[k, output_j] = (est_1 + est_2) / 2
+
+ return second_order_sobol
From 18ed2f2fe4e6dbb446142c9e8b1255906a382b45 Mon Sep 17 00:00:00 2001
From: Prateek Bhustali
Date: Sat, 7 May 2022 16:07:23 +0200
Subject: [PATCH 02/59] Add unit tests for sobol & sensitivity baseclass
---
tests/unit_tests/sensitivity/ishigami.py | 17 +
tests/unit_tests/sensitivity/sobol_func.py | 35 ++
.../unit_tests/sensitivity/test_baseclass.py | 234 ++++++++++
tests/unit_tests/sensitivity/test_sobol.py | 399 ++++++++++++++++++
4 files changed, 685 insertions(+)
create mode 100644 tests/unit_tests/sensitivity/ishigami.py
create mode 100644 tests/unit_tests/sensitivity/sobol_func.py
create mode 100644 tests/unit_tests/sensitivity/test_baseclass.py
create mode 100644 tests/unit_tests/sensitivity/test_sobol.py
diff --git a/tests/unit_tests/sensitivity/ishigami.py b/tests/unit_tests/sensitivity/ishigami.py
new file mode 100644
index 000000000..41ec55149
--- /dev/null
+++ b/tests/unit_tests/sensitivity/ishigami.py
@@ -0,0 +1,17 @@
+"""
+Auxiliary file
+==============================================
+"""
+
+import numpy as np
+
+def evaluate(X, params=[7, 0.1]):
+ """Non-monotonic Ishigami-Homma three parameter test function"""
+
+ a = params[0]
+ b = params[1]
+
+ Y = np.sin(X[:, 0]) + a * np.power(np.sin(X[:, 1]), 2) + \
+ b * np.power(X[:, 2], 4) * np.sin(X[:, 0])
+
+ return Y
diff --git a/tests/unit_tests/sensitivity/sobol_func.py b/tests/unit_tests/sensitivity/sobol_func.py
new file mode 100644
index 000000000..af1636315
--- /dev/null
+++ b/tests/unit_tests/sensitivity/sobol_func.py
@@ -0,0 +1,35 @@
+import numpy as np
+import copy
+
+
+def evaluate(X, a_values):
+
+ dims = len(a_values)
+ g = 1
+
+ for i in range(dims):
+ g_i = (np.abs(4 * X[:, i] - 2) + a_values[i]) / (1 + a_values[i])
+ g *= g_i
+
+ return g
+
+
+def sensitivities(a_values):
+
+ dims = len(a_values)
+
+ Total_order = np.zeros((dims, 1))
+
+ V_i = (3 * (1 + a_values) ** 2) ** (-1)
+
+ total_variance = np.prod(1 + V_i) - 1
+
+ First_order = V_i / total_variance
+
+ for i in range(dims):
+
+ rem_First_order = copy.deepcopy(V_i)
+ rem_First_order[i] = 0
+ Total_order[i] = V_i[i] * np.prod(rem_First_order + 1) / total_variance
+
+ return First_order.reshape(-1, 1), Total_order
diff --git a/tests/unit_tests/sensitivity/test_baseclass.py b/tests/unit_tests/sensitivity/test_baseclass.py
new file mode 100644
index 000000000..458826d6f
--- /dev/null
+++ b/tests/unit_tests/sensitivity/test_baseclass.py
@@ -0,0 +1,234 @@
+"""
+This module is used to test the functionalities of the baseclass.
+
+- test_pick_and_freeze_sampling:
+ Test the `generate_pick_and_test_samples` function.
+- test_bootstrap_for_vector:
+ Test the bootstrap sampling for a vector.
+- test_bootstrap_for_matrix:
+ Test the bootstrap sampling for a matrix.
+
+"""
+
+import numpy as np
+import pytest
+
+from UQpy.run_model.RunModel import RunModel
+from UQpy.run_model.model_execution.PythonModel import PythonModel
+from UQpy.distributions import Uniform
+from UQpy.distributions.collection.JointIndependent import JointIndependent
+from UQpy.sensitivity.sobol import Sobol
+from UQpy.sensitivity.baseclass.pickfreeze import generate_pick_freeze_samples
+
+# Prepare
+###############################################################################
+
+# Prepare the input distribution
+@pytest.fixture()
+def ishigami_input_dist_object():
+ """
+ This function returns the input distribution for the Ishigami function.
+
+ X1 ~ Uniform(-pi, pi)
+ X2 ~ Uniform(-pi, pi)
+ X3 ~ Uniform(-pi, pi)
+
+ """
+ return JointIndependent([Uniform(-np.pi, 2 * np.pi)] * 3)
+
+
+@pytest.fixture()
+def ishigami_model_object():
+ """This function creates the Ishigami run_model_object"""
+ model = PythonModel(
+ model_script="ishigami.py",
+ model_object_name="evaluate",
+ var_names=[r"$X_1$", "$X_2$", "$X_3$"],
+ delete_files=True,
+ params=[7, 0.1],
+ )
+
+ runmodel_obj = RunModel(model=model)
+
+ return runmodel_obj
+
+
+@pytest.fixture()
+def sobol_object(ishigami_model_object, ishigami_input_dist_object):
+ """This function returns the Sobol object."""
+
+ return Sobol(ishigami_model_object, ishigami_input_dist_object)
+
+
+@pytest.fixture()
+def sobol_object_input_samples_small(sobol_object):
+ """This creates the Sobol object."""
+
+ SA = sobol_object
+
+ np.random.seed(12345) # set seed for reproducibility
+
+ SA.n_samples = 2
+
+ return generate_pick_freeze_samples(SA.dist_object, SA.n_samples)
+
+
+# Generate N pick and free samples
+@pytest.fixture()
+def pick_and_freeze_samples_small():
+ """
+ This function returns input matrices A, B and C_i with a small number
+ of samples for the Ishigami input distribution.
+ This is used to test the `generate_pick_and_freeze_samples` function.
+
+ The samples are generated as follows:
+
+ dist_1 = JointInd([Uniform(-np.pi, 2*np.pi)]*3)
+
+ np.random.seed(12345) #! set seed for reproducibility
+
+ n_samples = 2
+ n_vars = 3
+
+ samples = dist_1.rvs(n_samples*2)
+
+ # Split samples
+ A_samples = samples[:n_samples, :]
+ B_samples = samples[n_samples:, :]
+
+ def _get_C_i(i, A, B):
+ C_i = copy.deepcopy(B)
+ C_i[:, i] = A[:, i]
+ return C_i
+
+ C_samples = np.zeros((n_vars, n_samples, n_vars))
+
+ for i in range(3):
+ C_samples[i, :, :] = _get_C_i(i, A_samples, B_samples)
+
+ print(np.around(A_samples,3))
+ print(np.around(B_samples,3))
+ print(np.around(C_samples,3))
+
+ """
+
+ A_samples = np.array([[2.699, 0.426, 1.564], [-1.154, 0.600, 0.965]])
+
+ B_samples = np.array([[-1.986, 2.919, 1.556], [-1.856, 0.962, 2.898]])
+
+ C_samples = np.array(
+ [
+ [[2.699, 2.919, 1.556], [-1.154, 0.962, 2.898]],
+ [[-1.986, 0.426, 1.556], [-1.856, 0.6, 2.898]],
+ [[-1.986, 2.919, 1.564], [-1.856, 0.962, 0.965]],
+ ]
+ )
+
+ return A_samples, B_samples, C_samples
+
+
+@pytest.fixture()
+def random_f_A():
+ """This function returns an A-like vector"""
+
+ rand_f_A = np.array([[100], [101], [102], [103], [104]])
+
+ return rand_f_A
+
+
+@pytest.fixture()
+def random_f_C_i():
+ """This function returns a C_i-like vector"""
+
+ rand_f_C_i = np.array([[100, 200], [101, 201], [102, 202], [103, 203], [104, 204]])
+ return rand_f_C_i
+
+
+@pytest.fixture()
+def manual_bootstrap_samples_f_A():
+ """This function bootstraps the A-like vector using random indices"""
+
+ # Genrated using np.random.randint(low=0, high=5, size=(5,1))
+ # with np.random.seed(12345)
+ # rand_indices_f_A = np.array([ [2],
+ # [1],
+ # [4],
+ # [1],
+ # [2]])
+
+ # bootstrap_f_A = rand_f_A[rand_indices_A]
+ bootstrap_sample_A = np.array([[102], [101], [104], [101], [102]])
+
+ return bootstrap_sample_A
+
+
+@pytest.fixture()
+def manual_bootstrap_samples_f_C_i():
+ """This function bootstraps the C_i-like vector using random indices"""
+
+ # Genrated using np.random.randint(low=0, high=5, size=(5,2))
+ # with np.random.seed(12345)
+ # rand_indices_C_i = np.array([ [2, 1],
+ # [4, 1],
+ # [2, 1],
+ # [1, 3],
+ # [1, 3]])
+
+ bootstrap_f_C_i = np.array(
+ [[102, 201], [104, 201], [102, 201], [101, 203], [101, 203]]
+ )
+
+ return bootstrap_f_C_i
+
+
+# Unit tests
+###############################################################################
+
+
+def test_pick_and_freeze_sampling(
+ pick_and_freeze_samples_small, sobol_object_input_samples_small
+):
+
+ """Test the `generate_pick_and_test_samples` function."""
+
+ # Prepare
+ A_samples, B_samples, C_samples = pick_and_freeze_samples_small
+ A_test, B_test, C_test_generator, _ = sobol_object_input_samples_small
+
+ # Act
+ assert np.allclose(A_samples, np.around(A_test, 3))
+ assert np.allclose(B_samples, np.around(B_test, 3))
+
+ for i in range(3):
+ C_test = next(C_test_generator)
+ assert np.allclose(C_samples[i, :, :], np.around(C_test, 3))
+
+
+def test_bootstrap_for_vector(random_f_A, manual_bootstrap_samples_f_A):
+
+ """Test the bootstrap sampling for a vector."""
+
+ # Prepare
+ np.random.seed(12345) #! set seed for reproducibility
+
+ gen_f_A = Sobol.bootstrap_sample_generator_1D(random_f_A)
+
+ bootstrap_samples_f_A = next(gen_f_A)
+
+ # Act
+ assert np.array_equal(manual_bootstrap_samples_f_A, bootstrap_samples_f_A)
+
+
+def test_bootstrap_for_matrix(random_f_C_i, manual_bootstrap_samples_f_C_i):
+
+ """Test the bootstrap sampling for a matrix."""
+
+ # Prepare
+ np.random.seed(12345) #! set seed for reproducibility
+
+ gen_f_C_i = Sobol.bootstrap_sample_generator_2D(random_f_C_i)
+
+ bootstrap_samples_C_i = next(gen_f_C_i)
+
+ # Act
+ assert np.array_equal(manual_bootstrap_samples_f_C_i, bootstrap_samples_C_i)
diff --git a/tests/unit_tests/sensitivity/test_sobol.py b/tests/unit_tests/sensitivity/test_sobol.py
new file mode 100644
index 000000000..64882a155
--- /dev/null
+++ b/tests/unit_tests/sensitivity/test_sobol.py
@@ -0,0 +1,399 @@
+"""
+This is the test module for Sobol sensitivity indices.
+
+Here, we will use the Ishigami function to test the output.
+
+The following methods are tested:
+1. generate_pick_and_freeze_samples
+2. pick_and_freeze_estimator (First and Total order Sobol indices)
+3. pick_and_freeze_estimator (Second order Sobol indices) using [1]_.
+
+References
+----------
+
+.. [1] Graham Glen, Kristin Isaacs, Estimating Sobol sensitivity indices using
+ correlations, Environmental Modelling & Software, Volume 37, 2012, Pages 157-166,
+ ISSN 1364-8152, https://doi.org/10.1016/j.envsoft.2012.03.014.
+
+
+Important
+----------
+The computed indices are computed using the `np.isclose` function.
+
+Function signature:
+ numpy.isclose(a, b, rtol=1e-05, atol=1e-08, equal_nan=False)
+
+ Parameters:
+ a, b: array_like
+ Input arrays to compare.
+
+ rtol: float
+ The relative tolerance parameter.
+
+ atol: float
+ The absolute tolerance parameter.
+
+Each element of the `diff` array is compared as follows:
+diff = |a - b|
+diff <= atol + rtol * abs(b)
+
+- relative tolerance: rtol * abs(b)
+ It is the maximum allowed difference between a and b,
+ relative to the absolute value of b.
+ For example, to set a tolerance of 1%, pass rol=0.01,
+ which assures that the values are within 2 decimal places of each other.
+- absolute tolerance: atol
+ When b is close to zero, the atol value is used.
+
+"""
+
+import ntpath
+import numpy as np
+import pytest
+import scipy
+
+from UQpy.run_model.RunModel import RunModel
+from UQpy.run_model.model_execution.PythonModel import PythonModel
+from UQpy.distributions import Uniform
+from UQpy.distributions.collection.JointIndependent import JointIndependent
+from UQpy.sensitivity.sobol import Sobol
+
+# Prepare
+###############################################################################
+
+# Prepare the input distribution
+@pytest.fixture()
+def ishigami_input_dist_object():
+ """
+ This function returns the input distribution for the Ishigami function.
+
+ X1 ~ Uniform(-pi, pi)
+ X2 ~ Uniform(-pi, pi)
+ X3 ~ Uniform(-pi, pi)
+
+ """
+ return JointIndependent([Uniform(-np.pi, 2 * np.pi)] * 3)
+
+
+@pytest.fixture()
+def ishigami_model_object():
+ """This function creates the Ishigami run_model_object"""
+ model = PythonModel(
+ model_script="ishigami.py",
+ model_object_name="evaluate",
+ var_names=[r"$X_1$", "$X_2$", "$X_3$"],
+ delete_files=True,
+ params=[7, 0.1],
+ )
+
+ runmodel_obj = RunModel(model=model)
+
+ return runmodel_obj
+
+
+@pytest.fixture()
+def sobol_object(ishigami_model_object, ishigami_input_dist_object):
+ """This function returns the Sobol object."""
+
+ return Sobol(ishigami_model_object, ishigami_input_dist_object)
+
+
+@pytest.fixture()
+def analytical_ishigami_Sobol_indices():
+ """
+ Analytical Sobol indices for the Ishigami function.
+
+ Copy-paste the following to reproduce the given indices:
+
+ a = 7
+ b = 0.1
+
+ V1 = 0.5*(1 + (b*np.pi**4)/5)**2
+ V2 = (a**2)/8
+ V3 = 0
+
+ VT3 = (8*(b**2)*np.pi**8)/225
+ VT1 = V1 + VT3
+ VT2 = V2
+
+ total_variance = V2 + (b*np.pi**4)/5 + ((b**2) * np.pi**8)/18 + 0.5
+
+ S = np.array([V1, V2, V3])/total_variance
+ S_T = np.array([VT1, VT2, VT3])/total_variance
+
+ S = np.around(S, 4)
+ S_T = np.around(S_T, 4)
+
+ """
+
+ S1 = 0.3139
+ S2 = 0.4424
+ S3 = 0
+
+ S_T1 = 0.5576
+ S_T2 = 0.4424
+ S_T3 = 0.2437
+
+ S = np.array([S1, S2, S3])
+ S_T = np.array([S_T1, S_T2, S_T3])
+
+ return S.reshape(-1, 1), S_T.reshape(-1, 1)
+
+
+@pytest.fixture()
+def saltelli_ishigami_Sobol_indices(sobol_object):
+
+ SA = sobol_object
+
+ np.random.seed(12345) #! set seed for reproducibility
+
+ computed_indices = SA.run(n_samples=1_000_000)
+
+ return computed_indices["sobol_i"], computed_indices["sobol_total_i"]
+
+
+@pytest.fixture()
+def NUM_SAMPLES():
+ """This function returns the number of samples for bootstrapping"""
+
+ num_bootstrap_samples = 10_000
+ num_samples = 100_000
+
+ return num_bootstrap_samples, num_samples
+
+
+@pytest.fixture()
+def bootstrap_sobol_index_variance(sobol_object, NUM_SAMPLES):
+
+ #### SETUP ####
+ SA = sobol_object
+
+ np.random.seed(12345) #! set seed for reproducibility
+
+ confidence_level = 0.95
+ delta = -scipy.stats.norm.ppf((1 - confidence_level) / 2)
+
+ num_bootstrap_samples, n_samples = NUM_SAMPLES
+
+ #### Compute indices ####
+ computed_indices = SA.run(
+ n_samples=n_samples,
+ num_bootstrap_samples=num_bootstrap_samples,
+ confidence_level=confidence_level,
+ )
+
+ First_order = computed_indices["sobol_i"].ravel()
+ Total_order = computed_indices["sobol_total_i"].ravel()
+ CI_first_order = computed_indices["CI_sobol_i"]
+ CI_total_order = computed_indices["CI_sobol_total_i"]
+
+ #### Compute variance ####
+ upper_bound_first_order = CI_first_order[:, 1]
+ upper_bound_total_order = CI_total_order[:, 1]
+
+ std_bootstrap_first_order = (upper_bound_first_order - First_order) / delta
+ std_bootstrap_total_order = (upper_bound_total_order - Total_order) / delta
+
+ return std_bootstrap_first_order**2, std_bootstrap_total_order**2
+
+
+@pytest.fixture()
+def model_eval_sobol_index_variance():
+
+ """
+ For computational efficiency, the variance of the Sobol indices
+ is precomputed using model evaluations with
+ NUM_SAMPLES (num_repetitions=10_000, num_samples=100_000)
+
+ Copy-paste the following code to generate the variance
+ of the Sobol indices:
+
+ runmodel_obj = RunModel(
+ model_script='ishigami.py',
+ var_names=['X1', 'X2', 'X3'],
+ vec=True, delete_files=True)
+
+ input_obj = JointInd([Uniform(-np.pi, 2*np.pi)]*3)
+
+ SA = Sobol(runmodel_obj, input_obj)
+
+ np.random.seed(12345) # for reproducibility
+
+ num_repetitions, n_samples = 10_000, 100_000
+
+ num_vars = 3
+
+ sample_first_order = np.zeros((num_vars, num_repetitions))
+ sample_total_order = np.zeros((num_vars, num_repetitions))
+
+ for i in range(num_repetitions):
+ S, S_T = SA.run(n_samples=n_samples)
+
+ sample_first_order[:, i] = S.ravel()
+ sample_total_order[:, i] = S_T.ravel()
+
+ variance_first_order = np.var(sample_first_order, axis=1, ddof=1).reshape(-1, 1)
+ variance_total_order = np.var(sample_total_order, axis=1, ddof=1).reshape(-1, 1)
+
+ print(variance_first_order)
+ print(variance_total_order)
+
+ """
+
+ variance_first_order = np.array([1.98518409e-05, 1.69268227e-05, 2.50390610e-05])
+
+ variance_total_order = np.array([2.82995855e-05, 2.46373399e-05, 2.59811868e-05])
+
+ return variance_first_order, variance_total_order
+
+
+@pytest.fixture()
+def sobol_g_function_input_dist_object():
+ """
+ This function returns the input distribution object for the Sobol G function.
+
+ X1 ~ Uniform(0, 1)
+ X2 ~ Uniform(0, 1)
+ X3 ~ Uniform(0, 1)
+ X4 ~ Uniform(0, 1)
+ X5 ~ Uniform(0, 1)
+ X6 ~ Uniform(0, 1)
+
+ """
+
+ dist_object = JointIndependent([Uniform(0, 1)] * 6)
+
+ return dist_object
+
+
+@pytest.fixture()
+def sobol_g_function_model_object():
+ """This function creates the Sobol g-function model object"""
+
+ a_vals = np.array([0.0, 0.5, 3.0, 9.0, 99.0, 99.0])
+
+ model = PythonModel(
+ model_script="sobol_func.py",
+ model_object_name="evaluate",
+ delete_files=True,
+ a_values=a_vals,
+ )
+
+ runmodel_obj = RunModel(model=model)
+
+ return runmodel_obj
+
+
+@pytest.fixture()
+def sobol_object_g_func(
+ sobol_g_function_input_dist_object, sobol_g_function_model_object
+):
+ """This function creates the Sobol object for the g-function"""
+
+ sobol_object = Sobol(
+ sobol_g_function_model_object, sobol_g_function_input_dist_object
+ )
+
+ return sobol_object
+
+
+@pytest.fixture()
+def analytical_sobol_g_func_second_order_indices():
+ """
+ This function returns the analytical second order Sobol indices for the g-function
+
+ The values were obtained from [1]_.
+
+ """
+
+ S12 = 0.0869305
+ S13 = 0.0122246
+ S14 = 0.00195594
+ S15 = 0.00001956
+ S16 = 0.00001956
+ S23 = 0.00543316
+ S24 = 0.00086931
+ S25 = 0.00000869
+ S26 = 0.00000869
+ S34 = 0.00012225
+ S35 = 0.00000122
+ S36 = 0.00000122
+ S45 = 0.00000020
+ S46 = 0.00000020
+ S56 = 2.0e-9
+
+ S_2 = [S12, S13, S14, S15, S16, S23, S24, S25, S26, S34, S35, S36, S45, S46, S56]
+
+ return np.array(S_2).reshape(-1, 1)
+
+
+@pytest.fixture()
+def saltelli_sobol_g_function(sobol_object_g_func):
+
+ SA = sobol_object_g_func
+
+ np.random.seed(12345) #! set seed for reproducibility
+
+ # Compute Sobol indices using the pick and freeze algorithm
+ # Save only second order indices
+ computed_indices = SA.run(n_samples=100_000, estimate_second_order=True)
+
+ return computed_indices["sobol_ij"]
+
+
+# Unit tests
+###############################################################################
+
+
+def test_pick_and_freeze_estimator(
+ analytical_ishigami_Sobol_indices, saltelli_ishigami_Sobol_indices
+):
+
+ """
+ Test the Saltelli pick and freeze estimator using 1_000_000 samples.
+ """
+
+ # Prepare
+ S_analytical, S_T_analytical = analytical_ishigami_Sobol_indices
+ S_saltelli, S_T_saltelli = saltelli_ishigami_Sobol_indices
+
+ # Act
+ assert S_analytical.shape == S_saltelli.shape
+ assert S_T_analytical.shape == S_T_saltelli.shape
+ # Idea: Measure accuracy upto 2 decimal places -> rtol=0, atol=1e-2
+ assert np.isclose(S_saltelli, S_analytical, rtol=0, atol=1e-2).all()
+ assert np.isclose(S_T_saltelli, S_T_analytical, rtol=0, atol=1e-2).all()
+
+
+def test_bootstrap_variance_computation(
+ model_eval_sobol_index_variance, bootstrap_sobol_index_variance
+):
+
+ """Test the bootstrap variance computation."""
+
+ # Prepare
+ var_first, var_total = model_eval_sobol_index_variance
+ boot_var_first, boot_var_total = bootstrap_sobol_index_variance
+
+ # Act
+ assert var_first.shape == boot_var_first.shape
+ assert var_total.shape == boot_var_total.shape
+
+ # Idea: Ensure bootstrap variance and MC variance are of same order -> rtol=0, atol=1e-4
+ assert np.isclose(boot_var_first, var_first, rtol=0, atol=1e-4).all()
+ assert np.isclose(boot_var_total, var_total, rtol=0, atol=1e-4).all()
+
+
+def test_second_order_indices(
+ analytical_sobol_g_func_second_order_indices, saltelli_sobol_g_function
+):
+
+ """Test the second order indices computation."""
+
+ # Prepare
+ S_2_analytical = analytical_sobol_g_func_second_order_indices
+ S_2 = saltelli_sobol_g_function
+
+ # Act
+ # Idea: Ensure second order indices are of same order -> rtol=0, atol=1e-4
+ assert np.isclose(S_2, S_2_analytical, rtol=0, atol=1e-2).all()
From d2da7d124eb25e04e6aaf69f7fb3c5a182f64f9a Mon Sep 17 00:00:00 2001
From: Prateek Bhustali
Date: Sat, 7 May 2022 21:56:06 +0200
Subject: [PATCH 03/59] Fixed minor typo
---
docs/code/sensitivity/morris/README.rst | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docs/code/sensitivity/morris/README.rst b/docs/code/sensitivity/morris/README.rst
index cb065129a..2d37d6599 100644
--- a/docs/code/sensitivity/morris/README.rst
+++ b/docs/code/sensitivity/morris/README.rst
@@ -3,7 +3,7 @@ Morris Screening
Consider a model of the sort :math:`Y=h(X)`, :math:`Y` is assumed to be scalar, :math:`X=[X_{1}, ..., X_{d}]`.
-For each input ;math:`X_{k}`, the elementary effect is computed as:
+For each input :math:`X_{k}`, the elementary effect is computed as:
.. math:: EE_{k} = \frac{Y(X_{1}, ..., X_{k}+\Delta, ..., X_{d})-Y(X_{1}, ..., X_{k}, ..., X_{d})}{\Delta}
From 2ae7cebbbd197323f05ec2dcbf34d684d5a8448c Mon Sep 17 00:00:00 2001
From: Prateek Bhustali
Date: Sun, 8 May 2022 14:20:25 +0200
Subject: [PATCH 04/59] Added modules in sensitivity __init__.py
---
src/UQpy/sensitivity/__init__.py | 3 +++
src/UQpy/sensitivity/baseclass/__init__.py | 2 ++
2 files changed, 5 insertions(+)
diff --git a/src/UQpy/sensitivity/__init__.py b/src/UQpy/sensitivity/__init__.py
index f7b7f2bdd..5a5b0d997 100644
--- a/src/UQpy/sensitivity/__init__.py
+++ b/src/UQpy/sensitivity/__init__.py
@@ -1,4 +1,7 @@
from UQpy.sensitivity.MorrisSensitivity import MorrisSensitivity
from UQpy.sensitivity.PceSensitivity import PceSensitivity
+from UQpy.sensitivity.sobol import Sobol
from . import MorrisSensitivity
+from . import PceSensitivity
+from . import Sobol
diff --git a/src/UQpy/sensitivity/baseclass/__init__.py b/src/UQpy/sensitivity/baseclass/__init__.py
index e69de29bb..7e11a2b63 100644
--- a/src/UQpy/sensitivity/baseclass/__init__.py
+++ b/src/UQpy/sensitivity/baseclass/__init__.py
@@ -0,0 +1,2 @@
+from UQpy.sensitivity.baseclass.sensitivity import *
+from UQpy.sensitivity.baseclass.pickfreeze import *
From 23c0607cf9e4e7a625d893e872177dd7e315c595 Mon Sep 17 00:00:00 2001
From: Prateek Bhustali
Date: Sun, 8 May 2022 14:21:05 +0200
Subject: [PATCH 05/59] Formatted conf.py with Black
---
docs/source/conf.py | 175 +++++++++++++++++++++++---------------------
1 file changed, 93 insertions(+), 82 deletions(-)
diff --git a/docs/source/conf.py b/docs/source/conf.py
index 77b7351f1..318e66625 100644
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -51,14 +51,14 @@
# nbsphinx_custom_formats={
# ".md": ["jupytext.reads", {"fmt": "mystnb"}]
# }
-autoclass_content = 'init'
+autoclass_content = "init"
add_module_names = False
-autodoc_member_order = 'bysource'
+autodoc_member_order = "bysource"
napoleon_use_param = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
-bibtex_bibfiles = ['bibliography.bib']
-bibtex_default_style = 'unsrt'
+bibtex_bibfiles = ["bibliography.bib"]
+bibtex_default_style = "unsrt"
# Try to remove duplicate labels
autosectionlabel_prefix_document = True
@@ -69,83 +69,89 @@
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "Model_Runs**"]
sphinx_gallery_conf = {
- 'examples_dirs': ['../code/dimension_reduction/diffusion_maps',
- '../code/dimension_reduction/pod',
- '../code/dimension_reduction/grassmann',
- '../code/distributions/continuous_1d',
- '../code/distributions/discrete_1d',
- '../code/distributions/multivariate',
- '../code/distributions/user_defined',
- '../code/sampling/adaptive_kriging',
- '../code/sampling/importance_sampling',
- '../code/sampling/monte_carlo',
- '../code/sampling/latin_hypercube',
- '../code/sampling/mcmc',
- '../code/sampling/simplex',
- '../code/sampling/true_stratified_sampling',
- '../code/sampling/refined_stratified_sampling',
- '../code/inference/mle',
- '../code/inference/info_model_selection',
- '../code/inference/bayes_parameter_estimation',
- '../code/inference/bayes_model_selection',
- '../code/transformations/nataf',
- '../code/sensitivity/morris',
- '../code/stochastic_processes/bispectral',
- '../code/stochastic_processes/karhunen_loeve',
- '../code/stochastic_processes/spectral',
- '../code/stochastic_processes/translation',
- '../code/reliability/form',
- '../code/reliability/sorm',
- '../code/reliability/subset_simulation',
- '../code/surrogates/srom',
- '../code/surrogates/gpr',
- '../code/surrogates/pce',
- '../code/RunModel',], # path to your example scripts,
- 'gallery_dirs': ['auto_examples/dimension_reduction/diffusion_maps',
- 'auto_examples/dimension_reduction/pod',
- 'auto_examples/dimension_reduction/grassmann',
- 'auto_examples/distributions/continuous_1d',
- 'auto_examples/distributions/discrete_1d',
- 'auto_examples/distributions/multivariate',
- 'auto_examples/distributions/user_defined',
- 'auto_examples/sampling/adaptive_kriging',
- 'auto_examples/sampling/importance_sampling',
- 'auto_examples/sampling/monte_carlo',
- 'auto_examples/sampling/latin_hypercube',
- 'auto_examples/sampling/mcmc',
- 'auto_examples/sampling/simplex',
- 'auto_examples/sampling/true_stratified_sampling',
- 'auto_examples/sampling/refined_stratified_sampling',
- 'auto_examples/inference/mle',
- 'auto_examples/inference/info_model_selection',
- 'auto_examples/inference/bayes_parameter_estimation',
- 'auto_examples/inference/bayes_model_selection',
- 'auto_examples/transformations/nataf',
- 'auto_examples/sensitivity/morris',
- 'auto_examples/stochastic_processes/bispectral',
- 'auto_examples/stochastic_processes/karhunen_loeve',
- 'auto_examples/stochastic_processes/spectral',
- 'auto_examples/stochastic_processes/translation',
- 'auto_examples/reliability/form',
- 'auto_examples/reliability/sorm',
- 'auto_examples/reliability/subset_simulation',
- 'auto_examples/surrogates/srom',
- 'auto_examples/surrogates/gpr',
- 'auto_examples/surrogates/pce',
- 'auto_examples/RunModel',], # path to where to save gallery generated output
- 'binder': {
+ "examples_dirs": [
+ "../code/dimension_reduction/diffusion_maps",
+ "../code/dimension_reduction/pod",
+ "../code/dimension_reduction/grassmann",
+ "../code/distributions/continuous_1d",
+ "../code/distributions/discrete_1d",
+ "../code/distributions/multivariate",
+ "../code/distributions/user_defined",
+ "../code/sampling/adaptive_kriging",
+ "../code/sampling/importance_sampling",
+ "../code/sampling/monte_carlo",
+ "../code/sampling/latin_hypercube",
+ "../code/sampling/mcmc",
+ "../code/sampling/simplex",
+ "../code/sampling/true_stratified_sampling",
+ "../code/sampling/refined_stratified_sampling",
+ "../code/inference/mle",
+ "../code/inference/info_model_selection",
+ "../code/inference/bayes_parameter_estimation",
+ "../code/inference/bayes_model_selection",
+ "../code/transformations/nataf",
+ "../code/sensitivity/morris",
+ "../code/sensitivity/sobol",
+ "../code/stochastic_processes/bispectral",
+ "../code/stochastic_processes/karhunen_loeve",
+ "../code/stochastic_processes/spectral",
+ "../code/stochastic_processes/translation",
+ "../code/reliability/form",
+ "../code/reliability/sorm",
+ "../code/reliability/subset_simulation",
+ "../code/surrogates/srom",
+ "../code/surrogates/gpr",
+ "../code/surrogates/pce",
+ "../code/RunModel",
+ ], # path to your example scripts,
+ "gallery_dirs": [
+ "auto_examples/dimension_reduction/diffusion_maps",
+ "auto_examples/dimension_reduction/pod",
+ "auto_examples/dimension_reduction/grassmann",
+ "auto_examples/distributions/continuous_1d",
+ "auto_examples/distributions/discrete_1d",
+ "auto_examples/distributions/multivariate",
+ "auto_examples/distributions/user_defined",
+ "auto_examples/sampling/adaptive_kriging",
+ "auto_examples/sampling/importance_sampling",
+ "auto_examples/sampling/monte_carlo",
+ "auto_examples/sampling/latin_hypercube",
+ "auto_examples/sampling/mcmc",
+ "auto_examples/sampling/simplex",
+ "auto_examples/sampling/true_stratified_sampling",
+ "auto_examples/sampling/refined_stratified_sampling",
+ "auto_examples/inference/mle",
+ "auto_examples/inference/info_model_selection",
+ "auto_examples/inference/bayes_parameter_estimation",
+ "auto_examples/inference/bayes_model_selection",
+ "auto_examples/transformations/nataf",
+ "auto_examples/sensitivity/morris",
+ "auto_examples/sensitivity/sobol",
+ "auto_examples/stochastic_processes/bispectral",
+ "auto_examples/stochastic_processes/karhunen_loeve",
+ "auto_examples/stochastic_processes/spectral",
+ "auto_examples/stochastic_processes/translation",
+ "auto_examples/reliability/form",
+ "auto_examples/reliability/sorm",
+ "auto_examples/reliability/subset_simulation",
+ "auto_examples/surrogates/srom",
+ "auto_examples/surrogates/gpr",
+ "auto_examples/surrogates/pce",
+ "auto_examples/RunModel",
+ ], # path to where to save gallery generated output
+ "binder": {
# Required keys
- 'org': 'SURGroup',
- 'repo': 'UQpy',
- 'branch': 'master', # Can be any branch, tag, or commit hash. Use a branch that hosts your docs.
- 'binderhub_url': 'https://mybinder.org',
+ "org": "SURGroup",
+ "repo": "UQpy",
+ "branch": "master", # Can be any branch, tag, or commit hash. Use a branch that hosts your docs.
+ "binderhub_url": "https://mybinder.org",
# Any URL of a binderhub deployment. Must be full URL (e.g. https://mybinder.org).
- 'dependencies': './binder/requirements.txt',
- 'notebooks_dir': 'notebooks',
- 'use_jupyter_lab': True
+ "dependencies": "./binder/requirements.txt",
+ "notebooks_dir": "notebooks",
+ "use_jupyter_lab": True
# Jupyter notebooks for Binder will be copied to this directory (relative to built documentation root).
},
- 'ignore_pattern': '/local_',
+ "ignore_pattern": "/local_",
}
# -- Options for HTML output -------------------------------------------------
@@ -159,9 +165,9 @@
html_theme = "sphinx_rtd_theme"
html_theme_options = {
- 'logo_only': True,
- 'style_nav_header_background': '#F0F0F0',
- 'vcs_pageview_mode': 'view'
+ "logo_only": True,
+ "style_nav_header_background": "#F0F0F0",
+ "vcs_pageview_mode": "view",
}
github_url = "https://github.com/SURGroup/UQpy"
@@ -173,13 +179,18 @@
html_static_path = ["_static"]
html_sidebars = {
- "**": ["about.html", "navigation.html", "relations.html", "searchbox.html", ]
+ "**": [
+ "about.html",
+ "navigation.html",
+ "relations.html",
+ "searchbox.html",
+ ]
}
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
-source_suffix = ['.rst', '.md']
+source_suffix = [".rst", ".md"]
# source_suffix = ".rst"
# The master toctree document.
From 5de396e2efbeffd97c0f000be6e126bca16fc6c7 Mon Sep 17 00:00:00 2001
From: Prateek Bhustali
Date: Sun, 8 May 2022 14:22:13 +0200
Subject: [PATCH 06/59] Added documentation for Sobol indices
---
docs/code/sensitivity/sobol/README.rst | 24 ++++++++
docs/source/sensitivity/index.rst | 5 +-
docs/source/sensitivity/sobol.rst | 79 ++++++++++++++++++++++++++
3 files changed, 106 insertions(+), 2 deletions(-)
create mode 100644 docs/code/sensitivity/sobol/README.rst
create mode 100644 docs/source/sensitivity/sobol.rst
diff --git a/docs/code/sensitivity/sobol/README.rst b/docs/code/sensitivity/sobol/README.rst
new file mode 100644
index 000000000..1be801c21
--- /dev/null
+++ b/docs/code/sensitivity/sobol/README.rst
@@ -0,0 +1,24 @@
+Sobol Sensitivity indices
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+These examples serve as a guide for using the Sobol sensitivity module. They have been taken from various papers to enable validation of the implementation and have been referenced accordingly.
+
+Single output models
+======================
+We demonstrate the computation of the Sobol indices for models with a single output using the following examples:
+
+1. Ishigami function
+2. Exponential function
+3. Sobol function with parameters a := [0., 0.5, 3., 9., 99., 99.] : Example from [2] page 11
+
+Multiple output models
+========================
+
+We demonstrate the computation of the Sobol indices for models with multiple outputs using the following example:
+
+1. Mechanical oscillator ODE (numerical model): Example from [1] page 19
+
+
+[1] Gamboa F, Janon A, Klein T, Lagnoux A, others. Sensitivity analysis for multidimensional and functional outputs. Electronic journal of statistics 2014; 8(1): 575-603.
+
+[2] Saltelli, A. (2002). Making best use of model evaluations to compute indices.
diff --git a/docs/source/sensitivity/index.rst b/docs/source/sensitivity/index.rst
index 8fbf6b391..0e5fef5c0 100644
--- a/docs/source/sensitivity/index.rst
+++ b/docs/source/sensitivity/index.rst
@@ -5,10 +5,10 @@ This module contains functionality for all the sampling methods supported in :py
The module currently contains the following classes:
+- :py:class:`.Sobol`: Class to compute Sobol sensitivity indices.
- :py:class:`.MorrisSensitivity`: Class to perform Morris.
- :py:class:`.PceSensitivity`: Class to compute the sensitivity indices using the :class:`.PolynomialChaosExpansion` method.
-
Sensitivity analysis comprises techniques focused on determining how the variations of input variables :math:`X=\left[ X_{1}, X_{2},…,X_{d} \right]` of a mathematical model influence the response value :math:`Y=h(X)`.
@@ -18,4 +18,5 @@ Sensitivity analysis comprises techniques focused on determining how the variati
:caption: Sensitivity
Morris Sensitivity
- Polynomial Chaos Sensitivity
\ No newline at end of file
+ Polynomial Chaos Sensitivity
+ Sobol Sensitivity
diff --git a/docs/source/sensitivity/sobol.rst b/docs/source/sensitivity/sobol.rst
new file mode 100644
index 000000000..1966330c1
--- /dev/null
+++ b/docs/source/sensitivity/sobol.rst
@@ -0,0 +1,79 @@
+
+Sobol indices
+----------------------------------------
+
+Sobol indices are the standard approach to calculate a global variance based sensitivity analysis.
+The indices are based on a variance decomposition of the model output. Using this decomposition allows us to assign the contribution of uncertain inputs to the variance of the model output.
+
+There are three main groups of indices:
+
+- First order indices (:math:`S_{i}`): Describe the fraction of the output variance due to a single uncertain input parameter. This amount of variance can be reduced if the uncertainty in the corresponding input is eliminated.
+
+- Higher order indices: Describe the fraction of the output variance due to interactions between uncertain input parameters. For example, the second order indices (:math:`S_{ij}`) describe the fraction of the output variance due to interactions between two uncertain input parameters :math:`i` and :math:`j`.
+
+- Total order indices (:math:`S_{T_{i}}`): Describe the fraction of the output variance due to a single input parameter and all higher order effects the input parameter is involved.
+
+If the first order index of an input parameter is equal to the total order index it implies that the parameter is not involved in any interaction effects.
+
+The Sobol indices are computed using the Pick-and-Freeze approach for single output and multi-output models. Since there are several variants of the Pick-and-Freeze approach, the schemes implemented to compute Sobol indices are listed below:
+
+(where, :math:`N` is the number of Monte Carlo samples and :math:`m` being the number of input parameters in the model)
+
+1. **First order indices** (:math:`S_{i}`)
+
+- Janon2014: Requires :math:`N(m + 1)` model evaluations
+
+.. math::
+ \frac{\mathbb{V}\left[E\left(Y \mid X_{i}\right)\right]}{\mathbb{V}(Y)} = \frac{\operatorname{Cov}\left(Y, Y_{C_{i}}\right)}{\mathbb{V}(Y)} = \frac{ (1 / N) Y_{A} \cdot Y_{C_{i}}-f_{0}^{2}}{ (1 / N)\frac{Y_{A} \cdot Y_{A} + Y_{C_{i}} \cdot Y_{C_{i}}}{2}-f_{0}^{2}}
+
+.. math::
+ y_{A}=f(A), \quad y_{C_{i}}=f(C_{i}), \quad f_{0}^{2}=\left(\frac{1}{2N} \sum_{j=1}^{N} y_{A}^{(j)} + y_{C_{i}}^{(j)} \right)^{2}
+
+Compared to "Sobol1993", the "Janon2014" estimator makes more efficient use of model evaluations and produces better smaller confidence intervals.
+
+- Sobol1993: Requires :math:`N(m + 1)` model evaluations [2]_.
+
+.. math::
+ S_{i} = \frac{\mathbb{V}\left[E\left(Y \mid X_{i}\right)\right]}{\mathbb{V}(Y)} = \frac{ (1/N) Y_{A} \cdot Y_{C_{i}}-f_{0}^{2}}{(1 / N) Y_{A} \cdot Y_{A}-f_{0}^{2}}
+
+.. math::
+ y_{A}=f(A), \quad y_{C_{i}}=f(C_{i}), \quad f_{0}^{2}=\left(\frac{1}{N} \sum_{j=1}^{N} y_{A}^{(j)} \right)^{2}
+
+- Saltelli2002: Requires :math:`N(2m + 2)` model evaluations [4]_.
+
+2. **Second order indices** (:math:`S_{ij}`)
+
+- Saltelli2002: Requires :math:`N(2m + 2)` model evaluations [4]_.
+
+3. **Total order indices** (:math:`S_{T_{i}}`)
+
+- Homma1996: Requires :math:`N(m + 1)` model evaluations [2]_.
+
+.. math::
+ S_{T_{i}} = 1 - \frac{\mathbb{V}\left[E\left(Y \mid \mathbf{X}_{\sim_{i}}\right)\right]}{\mathbb{V}(Y)} = 1 - \frac{ (1 / N) Y_{B} \cdot Y_{C_{i}}-f_{0}^{2}}{(1 / N) Y_{A} \cdot Y_{A}-f_{0}^{2}}
+
+.. math::
+ y_{A}=f(A), \quad y_{B}=f(B), \quad y_{C_{i}}=f(C_{i}), \quad f_{0}^{2}=\left(\frac{1}{2N} \sum_{j=1}^{N} y_{A}^{(j)} + y_{B}^{(j)} \right)^{2}
+
+- Saltelli2002: Requires :math:`N(2m + 2)` model evaluations [4]_.
+
+
+Sobol Class
+^^^^^^^^^^^^^^^^^^
+
+The :class:`Sobol` class is imported using the following command:
+
+>>> from UQpy.sensitivity.Sobol import Sobol
+
+Methods
+"""""""
+
+.. autoclass:: UQpy.sensitivity.Sobol
+ :members: run
+
+Examples
+""""""""""
+
+.. toctree::
+
+ Sobol Examples <../auto_examples/sensitivity/sobol/index>
From c38b2e6f20013054375cf02568dc7f11446b3c42 Mon Sep 17 00:00:00 2001
From: Prateek Bhustali
Date: Sun, 8 May 2022 14:22:39 +0200
Subject: [PATCH 07/59] Added examples for Sobol indices
---
.../sensitivity/sobol/local_exponential.py | 20 +++
docs/code/sensitivity/sobol/local_ishigami.py | 23 ++++
.../sobol/local_mechanical_oscillator_ODE.py | 60 +++++++++
.../sensitivity/sobol/local_sobol_func.py | 42 ++++++
.../sobol/plot_mechanical_oscillator_ODE.py | 92 +++++++++++++
.../sobol/plot_sobol_exponential.py | 60 +++++++++
.../code/sensitivity/sobol/plot_sobol_func.py | 124 ++++++++++++++++++
.../sensitivity/sobol/plot_sobol_ishigami.py | 102 ++++++++++++++
8 files changed, 523 insertions(+)
create mode 100644 docs/code/sensitivity/sobol/local_exponential.py
create mode 100644 docs/code/sensitivity/sobol/local_ishigami.py
create mode 100644 docs/code/sensitivity/sobol/local_mechanical_oscillator_ODE.py
create mode 100644 docs/code/sensitivity/sobol/local_sobol_func.py
create mode 100644 docs/code/sensitivity/sobol/plot_mechanical_oscillator_ODE.py
create mode 100644 docs/code/sensitivity/sobol/plot_sobol_exponential.py
create mode 100644 docs/code/sensitivity/sobol/plot_sobol_func.py
create mode 100644 docs/code/sensitivity/sobol/plot_sobol_ishigami.py
diff --git a/docs/code/sensitivity/sobol/local_exponential.py b/docs/code/sensitivity/sobol/local_exponential.py
new file mode 100644
index 000000000..1fd0ef0d9
--- /dev/null
+++ b/docs/code/sensitivity/sobol/local_exponential.py
@@ -0,0 +1,20 @@
+"""
+
+Auxiliary file
+==============================================
+
+"""
+
+import numpy as np
+
+
+def evaluate(X: np.array) -> np.array:
+ r"""A non-linear function that is used to demonstrate sensitivity index.
+
+ .. math::
+ f(x) = \exp(x_1 + 2*x_2)
+ """
+
+ Y = np.exp(X[:, 0] + 2 * X[:, 1])
+
+ return Y
diff --git a/docs/code/sensitivity/sobol/local_ishigami.py b/docs/code/sensitivity/sobol/local_ishigami.py
new file mode 100644
index 000000000..e5af649fe
--- /dev/null
+++ b/docs/code/sensitivity/sobol/local_ishigami.py
@@ -0,0 +1,23 @@
+"""
+
+Auxiliary file
+==============================================
+
+"""
+
+import numpy as np
+
+
+def evaluate(X, params=[7, 0.1]):
+ """Non-monotonic Ishigami-Homma three parameter test function"""
+
+ a = params[0]
+ b = params[1]
+
+ Y = (
+ np.sin(X[:, 0])
+ + a * np.power(np.sin(X[:, 1]), 2)
+ + b * np.power(X[:, 2], 4) * np.sin(X[:, 0])
+ )
+
+ return Y
diff --git a/docs/code/sensitivity/sobol/local_mechanical_oscillator_ODE.py b/docs/code/sensitivity/sobol/local_mechanical_oscillator_ODE.py
new file mode 100644
index 000000000..13b28c9fa
--- /dev/null
+++ b/docs/code/sensitivity/sobol/local_mechanical_oscillator_ODE.py
@@ -0,0 +1,60 @@
+"""
+
+Auxiliary file
+==============================================
+
+"""
+
+import numpy as np
+from scipy.integrate import solve_ivp
+
+
+def mech_oscillator(input_parameters):
+ """
+ We have the second order differential equation:
+
+ .. math::
+
+ m \ddot{x} + c \dot{x} + k x = 0
+
+ with initial conditions: :math: `x(0) = \ell`, :math: `\dot{x}(0) = 0`.
+
+ where, for example :math: `m \sim \mathcal{U}(10, 12)`,
+ :math: `c \sim \mathcal{U}(0.4, 0.8)`
+ :math: `k \sim \mathcal{U}(70, 90)`
+ :math: `\ell \sim \mathcal{U}(-1, -0.25)`.
+
+
+ References
+ ----------
+
+ .. [1] Gamboa F, Janon A, Klein T, Lagnoux A, others .
+ Sensitivity analysis for multidimensional and functional outputs.
+ Electronic journal of statistics 2014; 8(1): 575-603.
+
+ """
+
+ # unpack the input parameters
+ m, c, k, l = input_parameters[0]
+
+ # intial conditions
+ x_0 = l
+ v_0 = 0
+
+ # time points
+ t_0 = 0
+ t_f = 40
+ dt = 0.05
+ n_t = int((t_f - t_0) / dt)
+ T = np.linspace(t_0, t_f, n_t)
+
+ def ODE(t, y):
+ """
+ The ODE system.
+ """
+ return np.array([y[1], -(k / m) * y[0] - (c / m) * y[1]])
+
+ # solve the ODE
+ sol = solve_ivp(ODE, [t_0, t_f], [x_0, v_0], method="RK45", t_eval=T)
+
+ return sol.y[0]
diff --git a/docs/code/sensitivity/sobol/local_sobol_func.py b/docs/code/sensitivity/sobol/local_sobol_func.py
new file mode 100644
index 000000000..1ccabc6dd
--- /dev/null
+++ b/docs/code/sensitivity/sobol/local_sobol_func.py
@@ -0,0 +1,42 @@
+"""
+
+Auxiliary file
+==============================================
+
+"""
+
+import numpy as np
+import copy
+
+
+def evaluate(X, a_values):
+
+ dims = len(a_values)
+ g = 1
+
+ for i in range(dims):
+ g_i = (np.abs(4 * X[:, i] - 2) + a_values[i]) / (1 + a_values[i])
+ g *= g_i
+
+ return g
+
+
+def sensitivities(a_values):
+
+ dims = len(a_values)
+
+ Total_order = np.zeros((dims, 1))
+
+ V_i = (3 * (1 + a_values) ** 2) ** (-1)
+
+ total_variance = np.prod(1 + V_i) - 1
+
+ First_order = V_i / total_variance
+
+ for i in range(dims):
+
+ rem_First_order = copy.deepcopy(V_i)
+ rem_First_order[i] = 0
+ Total_order[i] = V_i[i] * np.prod(rem_First_order + 1) / total_variance
+
+ return First_order.reshape(-1, 1), Total_order
diff --git a/docs/code/sensitivity/sobol/plot_mechanical_oscillator_ODE.py b/docs/code/sensitivity/sobol/plot_mechanical_oscillator_ODE.py
new file mode 100644
index 000000000..6e03332d0
--- /dev/null
+++ b/docs/code/sensitivity/sobol/plot_mechanical_oscillator_ODE.py
@@ -0,0 +1,92 @@
+r"""
+
+Mechanical oscillator model (multioutput)
+==============================================
+
+The mechanical oscillator is governed by the following second-order ODE:
+
+.. math::
+ m \ddot{x} + c \dot{x} + k x = 0
+
+.. math::
+ x(0) = \ell, \dot{x}(0) = 0.
+
+The parameteres of the oscillator are modeled as follows:
+
+.. math::
+ m \sim \mathcal{U}(10, 12), c \sim \mathcal{U}(0.4, 0.8), k \sim \mathcal{U}(70, 90), \ell \sim \mathcal{U}(-1, -0.25).
+
+"""
+
+# %%
+import numpy as np
+import matplotlib.pyplot as plt
+
+from UQpy.run_model.RunModel import RunModel
+from UQpy.run_model.model_execution.PythonModel import PythonModel
+from UQpy.distributions import Uniform
+from UQpy.distributions.collection.JointIndependent import JointIndependent
+from UQpy.sensitivity.sobol import Sobol
+
+# %%
+# Create Model object
+model = PythonModel(
+ model_script="local_mechanical_oscillator_ODE.py",
+ model_object_name="mech_oscillator",
+ var_names=[r"$m$", "$c$", "$k$", "$\ell$"],
+ delete_files=True,
+)
+
+runmodel_obj = RunModel(model=model)
+
+# Define distribution object
+M = Uniform(10, (12 - 10))
+C = Uniform(0.4, (0.8 - 0.4))
+K = Uniform(70, (90 - 70))
+L = Uniform(-1, (-0.25 - -1))
+dist_object = JointIndependent([M, C, K, L])
+
+# %%
+SA = Sobol(runmodel_obj, dist_object)
+
+computed_indices = SA.run(n_samples=500)
+
+# %%
+# Plot the Sobol indices
+t_0 = 0
+t_f = 40
+dt = 0.05
+n_t = int((t_f - t_0) / dt)
+T = np.linspace(t_0, t_f, n_t)
+
+fig, ax = plt.subplots(1, 2, figsize=(16, 8))
+
+ax[0].plot(T, computed_indices["sobol_total_i"][0, :], "r", label=r"$m$")
+ax[0].plot(T, computed_indices["sobol_total_i"][1, :], "g", label=r"$c$")
+ax[0].plot(T, computed_indices["sobol_total_i"][2, :], label=r"$k$", color="royalblue")
+ax[0].plot(
+ T, computed_indices["sobol_total_i"][3, :], label=r"$\ell$", color="aquamarine"
+)
+
+ax[0].set_title("Total order Sobol indices", fontsize=16)
+ax[0].set_xlabel("time (s)", fontsize=16)
+ax[0].set_ylabel(r"$S_{T_i}$", fontsize=16)
+ax[0].set_xbound(0, t_f)
+ax[0].set_ybound(-0.2, 1.2)
+ax[0].legend()
+
+ax[1].plot(T, computed_indices["sobol_i"][0, :], "r", label=r"$m$")
+ax[1].plot(T, computed_indices["sobol_i"][1, :], "g", label=r"$c$")
+ax[1].plot(T, computed_indices["sobol_i"][2, :], label=r"$k$", color="royalblue")
+ax[1].plot(T, computed_indices["sobol_i"][3, :], label=r"$\ell$", color="aquamarine")
+
+ax[1].set_title("First order Sobol indices", fontsize=16)
+ax[1].set_xlabel("time (s)", fontsize=16)
+ax[1].set_ylabel(r"$S_i$", fontsize=16)
+ax[1].set_xbound(0, t_f)
+ax[1].set_ybound(-0.2, 1.2)
+ax[1].legend(fontsize=12)
+
+fig.suptitle("Pointwise-in-time Sobol indices", fontsize=20)
+
+plt.show()
diff --git a/docs/code/sensitivity/sobol/plot_sobol_exponential.py b/docs/code/sensitivity/sobol/plot_sobol_exponential.py
new file mode 100644
index 000000000..81c9b78e9
--- /dev/null
+++ b/docs/code/sensitivity/sobol/plot_sobol_exponential.py
@@ -0,0 +1,60 @@
+"""
+
+Exponential function
+==============================================
+
+.. math::
+ f(x) := \exp(x_1 + 2x_2), \quad x_1, x_2 \sim \mathcal{N}(0, 1)
+
+"""
+
+# %%
+from UQpy.run_model.RunModel import RunModel
+from UQpy.run_model.model_execution.PythonModel import PythonModel
+from UQpy.distributions import Normal
+from UQpy.distributions.collection.JointIndependent import JointIndependent
+from UQpy.sensitivity.sobol import Sobol
+
+# %%
+# Create Model object
+model = PythonModel(
+ model_script="local_exponential.py",
+ model_object_name="evaluate",
+ var_names=[
+ "X_1",
+ "X_2",
+ ],
+ delete_files=True,
+)
+
+runmodel_obj = RunModel(model=model)
+
+# Define distribution object
+dist_object = JointIndependent([Normal(0, 1)] * 2)
+
+# %% [markdown]
+# Compute Sobol indices
+
+# %%
+SA = Sobol(runmodel_obj, dist_object)
+
+# Compute Sobol indices using the pick and freeze algorithm
+computed_indices = SA.run(
+ n_samples=100_000, num_bootstrap_samples=1_000, confidence_level=0.95
+)
+
+# %% [markdown]
+# Expected first order Sobol indices (computed analytically):
+#
+# X1: 0.0118
+#
+# X2: 0.3738
+
+# %%
+computed_indices["sobol_i"]
+
+# %% [markdown]
+# Confidence intervals for first order Sobol indices
+
+# %%
+computed_indices["CI_sobol_i"]
diff --git a/docs/code/sensitivity/sobol/plot_sobol_func.py b/docs/code/sensitivity/sobol/plot_sobol_func.py
new file mode 100644
index 000000000..0f7f7ed0d
--- /dev/null
+++ b/docs/code/sensitivity/sobol/plot_sobol_func.py
@@ -0,0 +1,124 @@
+r"""
+
+Sobol function
+==============================================
+
+.. math::
+
+ g(x_1, x_2, \ldots, x_D) := \prod_{i=1}^{D} \frac{|4x_i - 2| + a_i}{1 + a_i},
+
+where,
+
+.. math::
+ x_i \sim \mathcal{U}(0, 1), \quad a_i \in \mathbb{R}.
+
+"""
+
+# %%
+import numpy as np
+
+from UQpy.run_model.RunModel import RunModel
+from UQpy.run_model.model_execution.PythonModel import PythonModel
+from UQpy.distributions import Uniform
+from UQpy.distributions.collection.JointIndependent import JointIndependent
+from UQpy.sensitivity.sobol import Sobol
+
+# %%
+# Create Model object
+num_vars = 6
+a_vals = np.array([0.0, 0.5, 3.0, 9.0, 99.0, 99.0])
+
+model = PythonModel(
+ model_script="local_sobol_func.py",
+ model_object_name="evaluate",
+ var_names=["X_" + str(i) for i in range(num_vars)],
+ delete_files=True,
+ a_values=a_vals,
+)
+
+runmodel_obj = RunModel(model=model)
+
+# Define distribution object
+dist_object = JointIndependent([Uniform(0, 1)] * num_vars)
+
+# %% [markdown]
+# #### Compute Sobol indices
+
+# %%
+SA = Sobol(runmodel_obj, dist_object)
+
+# Compute Sobol indices using the pick and freeze algorithm
+computed_indices = SA.run(n_samples=50_000, estimate_second_order=True)
+
+# %% [markdown]
+# First order Sobol indices
+#
+# $S_1$ = 5.86781190e-01
+#
+# $S_2$ = 2.60791640e-01
+#
+# $S_3$ = 3.66738244e-02
+#
+# $S_4$ = 5.86781190e-03
+#
+# $S_5$ = 5.86781190e-05
+#
+# $S_6$ = 5.86781190e-05
+
+# %%
+computed_indices["sobol_i"]
+
+# %% [markdown]
+# Total order Sobol indices
+#
+# $S_{T_1}$ = 6.90085892e-01
+#
+# $S_{T_2}$ = 3.56173364e-01
+#
+# $S_{T_3}$ = 5.63335422e-02
+#
+# $S_{T_4}$ = 9.17057664e-03
+#
+# $S_{T_5}$ = 9.20083854e-05
+#
+# $S_{T_6}$ = 9.20083854e-05
+#
+
+# %%
+computed_indices["sobol_total_i"]
+
+# %% [markdown]
+# Second-order Sobol indices
+#
+# $S_{12}$ = 0.0869305
+#
+# $S_{13}$ = 0.0122246
+#
+# $S_{14}$ = 0.00195594
+#
+# $S_{15}$ = 0.00001956
+#
+# $S_{16}$ = 0.00001956
+#
+# $S_{23}$ = 0.00543316
+#
+# $S_{24}$ = 0.00086931
+#
+# $S_{25}$ = 0.00000869
+#
+# $S_{26}$ = 0.00000869
+#
+# $S_{34}$ = 0.00012225
+#
+# $S_{35}$ = 0.00000122
+#
+# $S_{36}$ = 0.00000122
+#
+# $S_{45}$ = 0.00000020
+#
+# $S_{46}$ = 0.00000020
+#
+# $S_{56}$ = 2.0e-9
+
+# %%
+computed_indices["sobol_ij"]
diff --git a/docs/code/sensitivity/sobol/plot_sobol_ishigami.py b/docs/code/sensitivity/sobol/plot_sobol_ishigami.py
new file mode 100644
index 000000000..a448a61b4
--- /dev/null
+++ b/docs/code/sensitivity/sobol/plot_sobol_ishigami.py
@@ -0,0 +1,102 @@
+r"""
+
+Ishigami function
+==============================================
+
+.. math::
+ f(x_1, x_2, x_3) = sin(x_1) + a \cdot sin^2(x_2) + b \cdot x_3^4 sin(x_1)
+
+.. math::
+ x_1, x_2, x_3 \sim \mathcal{U}(-\pi, \pi), \quad a, b\in \mathbb{R}
+
+First order Sobol indices
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. math::
+ S_1 = \frac{V_1}{\mathbb{V}[Y]}, \quad S_2 = \frac{V_2}{\mathbb{V}[Y]}, \quad S_3 = \frac{V_3}{\mathbb{V}[Y]} = 0,
+
+.. math::
+ V_1 = 0.5 (1 + \frac{b\pi^4}{5})^2, \quad V_2 = \frac{a^2}{8}, \quad V_3 = 0
+
+.. math::
+ \mathbb{V}[Y] = \frac{a^2}{8} + \frac{b\pi^4}{5} + \frac{b^2\pi^8}{18} + \frac{1}{2}
+
+Total order Sobol indices
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. math::
+ S_{T_1} = \frac{V_{T1}}{\mathbb{V}[Y]}, \quad S_{T_2} = \frac{V_{T2}}{\mathbb{V}[Y]}, \quad S_{T_3} = \frac{V_{T3}}{\mathbb{V}[Y]}
+
+.. math::
+ V_{T1} = 0.5 (1 + \frac{b\pi^4}{5})^2 + \frac{8b^2\pi^8}{225}, \quad V_{T2}= \frac{a^2}{8}, \quad V_{T3} = \frac{8b^2\pi^8}{225}
+
+.. math::
+ \mathbb{V}[Y] = \frac{a^2}{8} + \frac{b\pi^4}{5} + \frac{b^2\pi^8}{18} + \frac{1}{2}
+
+"""
+
+# %%
+import numpy as np
+
+from UQpy.run_model.RunModel import RunModel
+from UQpy.run_model.model_execution.PythonModel import PythonModel
+from UQpy.distributions import Uniform
+from UQpy.distributions.collection.JointIndependent import JointIndependent
+from UQpy.sensitivity.sobol import Sobol
+
+# %%
+# Create Model object
+model = PythonModel(
+ model_script="local_ishigami.py",
+ model_object_name="evaluate",
+ var_names=[r"$X_1$", "$X_2$", "$X_3$"],
+ delete_files=True,
+ params=[7, 0.1],
+)
+
+runmodel_obj = RunModel(model=model)
+
+# Define distribution object
+dist_object = JointIndependent([Uniform(-np.pi, 2 * np.pi)] * 3)
+
+# %%
+SA = Sobol(runmodel_obj, dist_object)
+
+computed_indices = SA.run(n_samples=100_000, num_bootstrap_samples=100)
+
+# %% [markdown]
+# Expected first order Sobol indices:
+#
+# X1: 0.3139
+#
+# X2: 0.4424
+#
+# X3: 0.0
+#
+
+# %%
+computed_indices["sobol_i"]
+
+# %% [markdown]
+# Expected total order Sobol indices:
+#
+# X1: 0.55758886
+#
+# X2: 0.44241114
+#
+# X3: 0.24368366
+
+# %%
+computed_indices["sobol_total_i"]
+
+# %% [markdown]
+# Confidence intervals for first order Sobol indices
+
+# %%
+computed_indices["CI_sobol_i"]
+
+# %% [markdown]
+# Confidence intervals for total order Sobol indices
+
+# %%
+computed_indices["CI_sobol_total_i"]
From 068fde50f05f5084cfee6a33bab741f175637464 Mon Sep 17 00:00:00 2001
From: Prateek Bhustali
Date: Sun, 8 May 2022 20:37:09 +0200
Subject: [PATCH 08/59] =?UTF-8?q?Added=20Cram=C3=A9r-von=20Mises=20sensiti?=
=?UTF-8?q?vity=20index?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/UQpy/sensitivity/__init__.py | 2 +
src/UQpy/sensitivity/cramer_von_mises.py | 340 +++++++++++++++++++++++
2 files changed, 342 insertions(+)
create mode 100644 src/UQpy/sensitivity/cramer_von_mises.py
diff --git a/src/UQpy/sensitivity/__init__.py b/src/UQpy/sensitivity/__init__.py
index 5a5b0d997..10ca1565d 100644
--- a/src/UQpy/sensitivity/__init__.py
+++ b/src/UQpy/sensitivity/__init__.py
@@ -1,7 +1,9 @@
from UQpy.sensitivity.MorrisSensitivity import MorrisSensitivity
from UQpy.sensitivity.PceSensitivity import PceSensitivity
from UQpy.sensitivity.sobol import Sobol
+from UQpy.sensitivity.cramer_von_mises import CramervonMises
from . import MorrisSensitivity
from . import PceSensitivity
from . import Sobol
+from . import CramervonMises
diff --git a/src/UQpy/sensitivity/cramer_von_mises.py b/src/UQpy/sensitivity/cramer_von_mises.py
new file mode 100644
index 000000000..9cb2fdfe6
--- /dev/null
+++ b/src/UQpy/sensitivity/cramer_von_mises.py
@@ -0,0 +1,340 @@
+"""
+Computing the Cramér-von Mises sensitivity indices.
+
+References
+----------
+
+.. [1] Gamboa, F., Klein, T., & Lagnoux, A. (2018). Sensitivity Analysis
+ Based on Cramér-von Mises Distance. SIAM/ASA Journal on Uncertainty
+ Quantification, 6(2), 522-548. doi:10.1137/15M1025621
+
+.. [2] Gamboa, F., Gremaud, P., Klein, T., & Lagnoux, A. (2020). Global
+ Sensitivity Analysis: a new generation of mighty estimators based on
+ rank statistics. arXiv [math.ST]. http://arxiv.org/abs/2003.01772
+
+"""
+
+import logging
+
+import numpy as np
+
+from UQpy.sensitivity.baseclass.sensitivity import Sensitivity
+from UQpy.sensitivity.baseclass.pickfreeze import generate_pick_freeze_samples
+from UQpy.sensitivity.sobol import compute_first_order as compute_first_order_sobol
+from UQpy.sensitivity.sobol import compute_total_order as compute_total_order_sobol
+from UQpy.utilities.UQpyLoggingFormatter import UQpyLoggingFormatter
+
+# TODO: Sampling strategies
+
+
+class CramervonMises(Sensitivity):
+ """
+ Compute the Cramér-von Mises indices.
+
+ Currently only available for models with scalar output.
+
+ :param runmodel_object: The computational model. It should be of type :class:`.RunModel`. \
+ The output QoI can be a scalar or vector of length :code:`ny`, then the sensitivity \
+ indices of all :code:`ny` outputs are computed independently.
+
+ :param distributions: List of :class:`.Distribution` objects corresponding to each \
+ random variable, or :class:`.JointIndependent` object \
+ (multivariate RV with independent marginals).
+
+ **Methods:**
+ """
+
+ def __init__(
+ self, runmodel_object, dist_object, random_state=None, **kwargs
+ ) -> None:
+
+ super().__init__(
+ runmodel_object, dist_object, random_state=random_state, **kwargs
+ )
+
+ # Create logger with the same name as the class
+ self.logger = logging.getLogger(__name__)
+ self.logger.setLevel(logging.ERROR)
+ frmt = UQpyLoggingFormatter()
+
+ # create console handler with a higher log level
+ ch = logging.StreamHandler()
+ ch.setFormatter(frmt)
+
+ # add the handler to the logger
+ self.logger.addHandler(ch)
+
+ self.CVM_i = None
+ "First order Cramér-von Mises indices, :class:`numpy.ndarray` of shape :code:`(num_vars, 1)`"
+
+ self.CI_CVM_i = None
+ "Confidence intervals of the first order Cramér-von Mises indices, :class:`numpy.ndarray` of shape :code:`(num_vars, 2)`"
+
+ self.sobol_i = None
+ "First order Sobol indices computed using the pick-and-freeze samples, :class:`numpy.ndarray` of shape :code:`(num_vars, 1)`"
+
+ self.sobol_total_i = None
+ "Total order Sobol indices computed using the pick-and-freeze samples, :class:`numpy.ndarray` of shape :code:`(num_vars, 1)`"
+
+ self.n_samples = None
+ "Number of samples used to compute the Cramér-von Mises indices, :class:`int`"
+
+ self.num_vars = None
+ "Number of random variables, :class:`int`"
+
+ def run(
+ self,
+ n_samples=1_000,
+ estimate_sobol_indices=False,
+ num_bootstrap_samples=None,
+ confidence_level=0.95,
+ disable_CVM_indices=False,
+ ):
+
+ """
+ Compute the Cramér-von Mises indices.
+
+ :param n_samples: Number of samples used to compute the Cramér-von Mises indices. \
+ If :code:`None`, the number of samples is set to the number of samples \
+ in the model.
+
+ :param estimate_sobol_indices: If :code:`True`, the Sobol indices are estimated \
+ using the pick-and-freeze samples.
+
+ :param num_bootstrap_samples: Number of bootstrap samples used to estimate the \
+ Sobol indices. If :code:`None`, the number of bootstrap samples is set \
+ to the number of samples in the model.
+
+ :param confidence_level: Confidence level used to compute the confidence \
+ intervals of the Cramér-von Mises indices.
+
+ :param disable_CVM_indices: If :code:`True`, the Cramér-von Mises indices \
+ are not computed.
+
+ :return: A :class:`dict` with the following keys: \
+ :code:`CVM_i` of shape :code:`(num_vars, 1)`, \
+ :code:`CI_CVM_i` of shape :code:`(num_vars, 2)`, \
+ :code:`sobol_i` of shape :code:`(num_vars, 1)`, \
+ :code:`sobol_total_i` of shape :code:`(num_vars, 1)`.
+
+ """
+
+ # Check nsamples
+ self.n_samples = n_samples
+ if not isinstance(self.n_samples, int):
+ raise TypeError("UQpy: nsamples should be an integer")
+
+ # Check num_bootstrap_samples data type
+ if num_bootstrap_samples is not None:
+ if not isinstance(num_bootstrap_samples, int):
+ raise TypeError("UQpy: num_bootstrap_samples should be an integer.\n")
+ elif num_bootstrap_samples is None:
+ self.logger.info(
+ "UQpy: num_bootstrap_samples is set to None, confidence intervals will not be computed.\n"
+ )
+
+ ################## GENERATE SAMPLES ##################
+
+ A_samples, W_samples, C_i_generator, _ = generate_pick_freeze_samples(
+ self.dist_object, self.n_samples, self.random_state
+ )
+
+ self.logger.info("UQpy: Generated samples using the pick-freeze scheme.\n")
+
+ ################# MODEL EVALUATIONS ####################
+
+ A_model_evals = self._run_model(A_samples).reshape(-1, 1)
+
+ self.logger.info("UQpy: Model evaluations A completed.\n")
+
+ W_model_evals = self._run_model(W_samples).reshape(-1, 1)
+
+ self.logger.info("UQpy: Model evaluations W completed.\n")
+
+ self.num_vars = A_samples.shape[1]
+
+ C_i_model_evals = np.zeros((self.n_samples, self.num_vars))
+
+ for i, C_i in enumerate(C_i_generator):
+ C_i_model_evals[:, i] = self._run_model(C_i).ravel()
+
+ self.logger.info("UQpy: Model evaluations C completed.\n")
+
+ self.logger.info("UQpy: All model evaluations computed successfully.\n")
+
+ ######################### STORAGE ########################
+
+ # Create dictionary to store the sensitivity indices
+ computed_indices = {}
+
+ ################## COMPUTE CVM INDICES ##################
+
+ # flag is used to disable computation of
+ # CVM indices during testing
+ if not disable_CVM_indices:
+ # Compute the Cramér-von Mises indices
+ self.CVM_i = self.pick_and_freeze_estimator(
+ A_model_evals, W_model_evals, C_i_model_evals
+ )
+
+ self.logger.info("UQpy: Cramér-von Mises indices computed successfully.\n")
+
+ # Store the indices in the dictionary
+ computed_indices["CVM_i"] = self.CVM_i
+
+ ################# COMPUTE CONFIDENCE INTERVALS ##################
+
+ if num_bootstrap_samples is not None:
+
+ self.logger.info("UQpy: Computing confidence intervals ...\n")
+
+ estimator_inputs = [
+ A_model_evals,
+ W_model_evals,
+ C_i_model_evals,
+ ]
+
+ self.CI_CVM_i = self.bootstrapping(
+ self.pick_and_freeze_estimator,
+ estimator_inputs,
+ computed_indices["CVM_i"],
+ num_bootstrap_samples,
+ confidence_level,
+ )
+
+ self.logger.info(
+ "UQpy: Confidence intervals for Cramér-von Mises indices computed successfully.\n"
+ )
+
+ # Store the indices in the dictionary
+ computed_indices["CI_CVM_i"] = self.CI_CVM_i
+
+ ################## COMPUTE SOBOL INDICES ##################
+
+ if estimate_sobol_indices:
+
+ self.logger.info("UQpy: Computing First order Sobol indices ...\n")
+
+ # extract shape
+ _shape = C_i_model_evals.shape
+
+ # convert C_i_model_evals to 3D array
+ # with n_outputs=1 in first dimension
+ n_outputs = 1
+ C_i_model_evals = C_i_model_evals.reshape((n_outputs, *_shape))
+
+ self.sobol_i = compute_first_order_sobol(
+ A_model_evals, W_model_evals, C_i_model_evals
+ )
+
+ self.logger.info("UQpy: First order Sobol indices computed successfully.\n")
+
+ self.sobol_total_i = compute_total_order_sobol(
+ A_model_evals, W_model_evals, C_i_model_evals
+ )
+
+ self.logger.info("UQpy: Total order Sobol indices computed successfully.\n")
+
+ # Store the indices in the dictionary
+ computed_indices["sobol_i"] = self.sobol_i
+ computed_indices["sobol_total_i"] = self.sobol_total_i
+
+ return computed_indices
+
+ @staticmethod
+ def indicator_function(Y, W):
+ """
+ Vectorized version of the indicator function.
+
+ .. math::
+ \mathbb{I}(Y,W) = \mathbf{1}_{Y \leq W}
+
+ **Inputs:**
+
+ * **Y** (`ndarray`):
+ Vector of values of the random variable.
+ Shape: `(N, 1)`
+
+ * **W** (`ndarray`):
+ Vector of values of the random variable.
+ Shape: `(N, 1)`
+
+ **Outputs:**
+
+ * **indicator** (`ndarray`):
+ Shape: `(N, 1)`
+
+ """
+ return (Y <= W.T).astype(int)
+
+ def pick_and_freeze_estimator(self, A_model_evals, W_model_evals, C_i_model_evals):
+
+ """
+ Compute the first order Cramér-von Mises indices
+ using the Pick-and-Freeze estimator.
+
+ **Inputs**
+
+ * **A_model_evals** (`np.array`):
+ Shape: `(n_samples, 1)`
+
+ * **W_model_evals** (`np.array`):
+ Shape: `(n_samples, 1)`
+
+ * **C_i_model_evals** (`np.array`):
+ Shape: `(n_samples, num_vars)`
+
+ **Outputs**
+
+ * **First_order_CVM** (`np.array`):
+ Shape: `(num_vars)`
+
+ """
+
+ ## **Notes**
+
+ # Implementation using 2 `for` loops. This is however
+ # faster than the vectorized version which has only 1 `for` loop.
+
+ # For N = 50_000 runs
+ # With 2 `for` loops: 26.75 seconds (this implementation)
+ # With 1 `for` loops: 62.42 seconds (vectorized implementation)
+
+ # Possible improvements:
+ # Check indicator function run time using a profiler
+ # as it results in an `N` x `N` array.
+ # Q. Does it use a for loop under the hood?
+ # Computations such as `np.sum` and `np.mean`
+ # are handled by numpy so they are fast.
+ # (This should however be faster for small `N`, e.g. N=10_000)
+
+ N = self.n_samples
+ m = self.num_vars
+
+ # Model evaluations
+ f_A = A_model_evals.ravel()
+ f_W = W_model_evals.ravel()
+ f_C_i = C_i_model_evals
+
+ # Store CramérvonMises indices
+ First_order_indices = np.zeros((m, 1))
+
+ # Compute Cramér-von Mises indices
+ for i in range(m):
+ sum_numerator = 0
+ sum_denominator = 0
+
+ for k in range(N):
+
+ term_1 = self.indicator_function(f_A, f_W[k])
+ term_2 = self.indicator_function(f_C_i[:, i], f_W[k])
+
+ mean_sum = (1 / (2 * N)) * np.sum(term_1 + term_2)
+ mean_product = (1 / N) * np.sum(term_1 * term_2)
+
+ sum_numerator += mean_product - mean_sum**2
+ sum_denominator += mean_sum - mean_sum**2
+
+ First_order_indices[i] = sum_numerator / sum_denominator
+
+ return First_order_indices
From f16288d6d537bb96cded08ae98f7a11d3ebb4021 Mon Sep 17 00:00:00 2001
From: Prateek Bhustali
Date: Sun, 8 May 2022 20:39:36 +0200
Subject: [PATCH 09/59] =?UTF-8?q?Add=20unit=20tests=20for=20Cram=C3=A9r-vo?=
=?UTF-8?q?n=20Mises=20sensitivity?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
tests/unit_tests/sensitivity/exponential.py | 25 ++
.../sensitivity/test_cramer_von_mises.py | 338 ++++++++++++++++++
tests/unit_tests/sensitivity/test_sobol.py | 1 -
3 files changed, 363 insertions(+), 1 deletion(-)
create mode 100644 tests/unit_tests/sensitivity/exponential.py
create mode 100644 tests/unit_tests/sensitivity/test_cramer_von_mises.py
diff --git a/tests/unit_tests/sensitivity/exponential.py b/tests/unit_tests/sensitivity/exponential.py
new file mode 100644
index 000000000..dc8c90df3
--- /dev/null
+++ b/tests/unit_tests/sensitivity/exponential.py
@@ -0,0 +1,25 @@
+import numpy as np
+
+
+def evaluate(X: np.array) -> np.array:
+ r"""A non-linear function that is used to test Cramer-von Mises sensitivity index.
+
+ .. math::
+ f(x) = \exp(x_1 + 2*x_2)
+
+ Parameters
+ ----------
+ X : np.array
+ An `N*D` array holding values for each parameter, where `N` is the
+ number of samples and `D` is the number of parameters
+ (in this case, 2).
+
+ Returns
+ -------
+ np.array
+ [description]
+ """
+
+ Y = np.exp(X[:, 0] + 2 * X[:, 1])
+
+ return Y
diff --git a/tests/unit_tests/sensitivity/test_cramer_von_mises.py b/tests/unit_tests/sensitivity/test_cramer_von_mises.py
new file mode 100644
index 000000000..46cebb429
--- /dev/null
+++ b/tests/unit_tests/sensitivity/test_cramer_von_mises.py
@@ -0,0 +1,338 @@
+"""
+This is the test module for Cramer sensitivity indices.
+
+Here, we will use the an exponential function to test the output.
+
+The following methods are tested:
+1. pick_and_freeze_estimator
+2. bootstrap_variance_computation
+
+Important
+----------
+The computed indices are computed using the `np.isclose` function.
+
+Function signature:
+ numpy.isclose(a, b, rtol=1e-05, atol=1e-08, equal_nan=False)
+
+ Parameters:
+ a, b: array_like
+ Input arrays to compare.
+
+ rtol: float
+ The relative tolerance parameter.
+
+ atol: float
+ The absolute tolerance parameter.
+
+Each element of the `diff` array is compared as follows:
+diff = |a - b|
+diff <= atol + rtol * abs(b)
+
+- relative tolerance: rtol * abs(b)
+ It is the maximum allowed difference between a and b,
+ relative to the absolute value of b.
+ For example, to set a tolerance of 1%, pass rol=0.01,
+ which assures that the values are within 2 decimal places of each other.
+
+- absolute tolerance: atol
+ When b is close to zero, the atol value is used.
+
+"""
+
+import numpy as np
+import pytest
+import scipy
+
+from UQpy.run_model.RunModel import RunModel
+from UQpy.run_model.model_execution.PythonModel import PythonModel
+from UQpy.distributions import Normal, Uniform
+from UQpy.distributions.collection.JointIndependent import JointIndependent
+from UQpy.sensitivity.cramer_von_mises import CramervonMises
+
+# Prepare
+###############################################################################
+
+# Prepare the input distribution
+@pytest.fixture()
+def exponential_input_dist_object():
+ """
+ This function returns the input distribution for the Ishigami function.
+
+ X1 ~ Normal(0,1)
+ X2 ~ Normal(0,1)
+
+ """
+ return JointIndependent([Normal(0, 1)] * 2)
+
+
+@pytest.fixture()
+def exponential_model_object():
+ """This function creates the exponential run_model_object"""
+ model = PythonModel(
+ model_script="exponential.py",
+ model_object_name="evaluate",
+ var_names=[r"$X_1$", "$X_2$"],
+ delete_files=True,
+ )
+
+ runmodel_obj = RunModel(model=model)
+
+ return runmodel_obj
+
+
+@pytest.fixture()
+def CVM_object(exponential_model_object, exponential_input_dist_object):
+ """This function returns the CVM object."""
+
+ return CramervonMises(exponential_model_object, exponential_input_dist_object)
+
+
+@pytest.fixture()
+def analytical_exponential_CVM_indices():
+ """This function returns the analytical Cramer-von-Mises indices.
+
+ S1_CVM = (6/np.pi) * np.arctan(2) - 2
+ S2_CVM = (6/np.pi) * np.arctan(np.sqrt(19)) - 2
+
+ print(np.around(S1_CVM, 4))
+ print(np.around(S2_CVM, 4))
+
+ """
+
+ return np.array([[0.1145], [0.5693]])
+
+
+@pytest.fixture()
+def numerical_exponential_CVM_indices(CVM_object):
+ """
+ This function returns the Cramer-von-Mises indices
+ computed using the Pick and Freeze algorithm.
+
+ """
+
+ SA = CVM_object
+
+ np.random.seed(12345) #! set seed for reproducibility
+
+ computed_indices = SA.run(n_samples=50_000)
+
+ return computed_indices["CVM_i"]
+
+
+@pytest.fixture()
+def NUM_SAMPLES():
+ """This function returns the number of samples."""
+
+ num_bootstrap_samples = 50
+ num_samples = 10_000
+
+ return num_bootstrap_samples, num_samples
+
+
+@pytest.fixture()
+def bootstrap_CVM_index_variance(CVM_object, NUM_SAMPLES):
+ """This function returns the variance in the computed Cramer-von-Mises index
+ computed using the bootstrap algorithm."""
+
+ #### SETUP ####
+ SA = CVM_object
+
+ np.random.seed(12345) #! set seed for reproducibility
+
+ confidence_level = 0.95
+ delta = -scipy.stats.norm.ppf((1 - confidence_level) / 2)
+
+ num_bootstrap_samples, n_samples = NUM_SAMPLES
+
+ #### Compute indices ####
+ computed_indices = SA.run(
+ n_samples=n_samples,
+ num_bootstrap_samples=num_bootstrap_samples,
+ confidence_level=confidence_level,
+ )
+
+ First_order = computed_indices["CVM_i"].ravel()
+ upper_bound_first_order = computed_indices["CI_CVM_i"][:, 1]
+
+ #### Compute variance ####
+ std_bootstrap_first_order = (upper_bound_first_order - First_order) / delta
+
+ return std_bootstrap_first_order**2
+
+
+@pytest.fixture()
+def model_evals_CVM_index_variance():
+
+ """
+ runmodel_obj = RunModel(
+ model_script='exponential.py',
+ var_names=['X1', 'X2'],
+ vec=True, delete_files=True)
+
+ input_object = JointInd([Normal(0, 1)]*2)
+
+ SA = CramervonMises(runmodel_obj, input_object)
+
+ np.random.seed(12345)
+
+ num_repetitions, n_samples = 1_000, 10_000
+
+ num_vars = 2
+
+ sample_first_order = np.zeros((num_vars, num_repetitions))
+
+ for i in range(num_repetitions):
+ CV_First_order = SA.run(n_samples=n_samples)
+
+ sample_first_order[:, i] = CV_First_order.ravel()
+
+ variance_first_order = np.var(sample_first_order, axis=1).reshape(-1, 1)
+
+ print(variance_first_order)
+
+ """
+
+ variance_first_order = np.array([4.01099066e-05, 2.06802165e-05])
+
+ return variance_first_order
+
+
+@pytest.fixture()
+def ishigami_input_dist_object():
+ """
+ This function returns the input distribution for the Ishigami function.
+
+ X1 ~ Uniform(-pi, pi)
+ X2 ~ Uniform(-pi, pi)
+ X3 ~ Uniform(-pi, pi)
+
+ """
+ return JointIndependent([Uniform(-np.pi, 2 * np.pi)] * 3)
+
+
+@pytest.fixture()
+def ishigami_model_object():
+ """This function creates the Ishigami run_model_object"""
+ model = PythonModel(
+ model_script="ishigami.py",
+ model_object_name="evaluate",
+ var_names=[r"$X_1$", "$X_2$", "$X_3$"],
+ delete_files=True,
+ params=[7, 0.1],
+ )
+
+ runmodel_obj = RunModel(model=model)
+
+ return runmodel_obj
+
+
+@pytest.fixture()
+def CVM_object_ishigami(ishigami_model_object, ishigami_input_dist_object):
+ """This function returns the CVM object."""
+
+ return CramervonMises(ishigami_model_object, ishigami_input_dist_object)
+
+
+@pytest.fixture()
+def numerical_Sobol_indices(CVM_object_ishigami):
+ """
+ This function returns the Sobol indices computed
+ using the Pick and Freeze algorithm.
+ """
+
+ SA = CVM_object_ishigami
+
+ np.random.seed(12345)
+
+ computed_indices = SA.run(
+ n_samples=500_000, estimate_sobol_indices=True, disable_CVM_indices=True
+ )
+
+ return computed_indices["sobol_i"], computed_indices["sobol_total_i"]
+
+
+@pytest.fixture()
+def analytical_ishigami_Sobol_indices():
+ """
+ Analytical Sobol indices for the Ishigami function.
+
+ Copy-paste the following to reproduce the given indices:
+
+ a = 7
+ b = 0.1
+
+ V1 = 0.5*(1 + (b*np.pi**4)/5)**2
+ V2 = (a**2)/8
+ V3 = 0
+
+ VT3 = (8*(b**2)*np.pi**8)/225
+ VT1 = V1 + VT3
+ VT2 = V2
+
+ total_variance = V2 + (b*np.pi**4)/5 + ((b**2) * np.pi**8)/18 + 0.5
+
+ S = np.array([V1, V2, V3])/total_variance
+ S_T = np.array([VT1, VT2, VT3])/total_variance
+
+ S = np.around(S, 4)
+ S_T = np.around(S_T, 4)
+
+ """
+
+ S1 = 0.3139
+ S2 = 0.4424
+ S3 = 0
+
+ S_T1 = 0.5576
+ S_T2 = 0.4424
+ S_T3 = 0.2437
+
+ S = np.array([S1, S2, S3])
+ S_T = np.array([S_T1, S_T2, S_T3])
+
+ return S.reshape(-1, 1), S_T.reshape(-1, 1)
+
+
+# Unit tests
+###############################################################################
+
+
+def test_pick_and_freeze_estimator(
+ numerical_exponential_CVM_indices, analytical_exponential_CVM_indices
+):
+ """
+ This function tests the pick_and_freeze_estimator method using 50_000 samples.
+ """
+ S_CVM_analytical = analytical_exponential_CVM_indices
+ S_CVM_numerical = numerical_exponential_CVM_indices
+
+ assert np.isclose(S_CVM_analytical, S_CVM_numerical, rtol=0, atol=1e-2).all()
+
+
+def test_bootstrap_variance_computation(
+ bootstrap_CVM_index_variance, model_evals_CVM_index_variance
+):
+ """
+ This function tests the bootstrap_variance_computation method using
+ 100_000 samples and 1_000 bootstrap samples.
+ """
+ var_first = model_evals_CVM_index_variance
+ boot_var_first = bootstrap_CVM_index_variance
+
+ assert var_first.shape == boot_var_first.shape
+ assert np.isclose(boot_var_first, var_first, rtol=0, atol=1e-4).all()
+
+
+def test_Sobol_estimate_computation(
+ numerical_Sobol_indices, analytical_ishigami_Sobol_indices
+):
+ """
+ This function tests the Sobol_estimate_computation method using 1_000_000 samples.
+ """
+ S_numerical, S_T_numerical = numerical_Sobol_indices
+ S_analytical, S_T_analytical = analytical_ishigami_Sobol_indices
+
+ assert S_analytical.shape == S_numerical.shape
+ assert S_T_analytical.shape == S_T_numerical.shape
+ assert np.isclose(S_numerical, S_analytical, rtol=0, atol=1e-2).all()
+ assert np.isclose(S_T_numerical, S_T_analytical, rtol=0, atol=1e-2).all()
diff --git a/tests/unit_tests/sensitivity/test_sobol.py b/tests/unit_tests/sensitivity/test_sobol.py
index 64882a155..784eb6143 100644
--- a/tests/unit_tests/sensitivity/test_sobol.py
+++ b/tests/unit_tests/sensitivity/test_sobol.py
@@ -47,7 +47,6 @@
"""
-import ntpath
import numpy as np
import pytest
import scipy
From 27305fc4ecb3a9342892bc0b88446cf77f970979 Mon Sep 17 00:00:00 2001
From: Prateek Bhustali
Date: Sun, 8 May 2022 20:39:59 +0200
Subject: [PATCH 10/59] =?UTF-8?q?Added=20documentation=20Cram=C3=A9r-von?=
=?UTF-8?q?=20Mises=20sensitivity?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../sensitivity/cramer_von_mises/README.rst | 3 +
docs/source/sensitivity/cramer_von_mises.rst | 61 +++++++++++++++++++
docs/source/sensitivity/index.rst | 2 +
3 files changed, 66 insertions(+)
create mode 100644 docs/code/sensitivity/cramer_von_mises/README.rst
create mode 100644 docs/source/sensitivity/cramer_von_mises.rst
diff --git a/docs/code/sensitivity/cramer_von_mises/README.rst b/docs/code/sensitivity/cramer_von_mises/README.rst
new file mode 100644
index 000000000..ea5f804b6
--- /dev/null
+++ b/docs/code/sensitivity/cramer_von_mises/README.rst
@@ -0,0 +1,3 @@
+Cramér-von Mises Sensitivity indices
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
diff --git a/docs/source/sensitivity/cramer_von_mises.rst b/docs/source/sensitivity/cramer_von_mises.rst
new file mode 100644
index 000000000..0c748bbb7
--- /dev/null
+++ b/docs/source/sensitivity/cramer_von_mises.rst
@@ -0,0 +1,61 @@
+Cramér-von Mises indices
+----------------------------------------
+
+A sensitivity index based on the Cramér-von Mises distance. In contrast to variance based Sobol indices it takes into account the whole distribution of the model output and is therefore considered as a moment-free method [5]_. Furthermore the index can be naturally extended to multivariate model outputs (not implemented yet in UQPy).
+
+Consider a model :math:`Y=f(X): \mathbb{R}^d \rightarrow \mathbb{R}^k` with :math:`d` inputs :math:`X_{(1)}, X_{(2)}, \ldots, X_{(d)}` and :math:`k` outputs :math:`Y_{(1)}, Y_{(2)}, \ldots, Y_{(k)}`. We define the cumulative distribution function :math:`F(t)` of :math:`Y` as:
+
+.. math::
+
+ F(t)=\mathbb{P}(Z \leqslant t)=\mathbb{E}\left[\mathbb{1}_{\{Z \leqslant t\}}\right] \text { for } t=\left(t_{1}, \ldots, t_{k}\right) \in \mathbb{R}^{k}
+
+and the conditional distribution function :math:`F(t)` of :math:`Y` as:
+
+.. math::
+
+ F^{v}(t)=\mathbb{P}\left(Z \leqslant t \mid X_{v}\right)=\mathbb{E}\left[\mathbb{1}_{\{Z \leqslant t\}} \mid X_{v}\right] \text { for } t=\left(t_{1}, \ldots, t_{k}\right) \in \mathbb{R}^{k}
+
+where, :math:`\{Z \leqslant t\} \text { means that } \left\{Z_{1} \leqslant t_{1}, \ldots, Z_{k} \leqslant t_{k}\right\}`.
+
+The first order Cramér-von Mises index :math:`S_{2, C V M}^{i}` (for input :math:`v = {i}`) is defined as:
+
+.. math::
+
+ S_{2, C V M}^{i}:=\frac{\int_{\mathbb{R}^{k}} \mathbb{E}\left[\left(F(t)-F^{i}(t)\right)^{2}\right] d F(t)}{\int_{\mathbb{R}^{k}} F(t)(1-F(t)) d F(t)}
+
+and the total Cramér-von Mises index :math:`S_{2, C V M}^{T o t, i}` (for input :math:`v = {i}`) is defined as:
+
+.. math::
+
+ S_{2, C V M}^{T o t, i}:=1-S_{2, C V M}^{\sim i}=1-\frac{\int_{\mathbb{R}^{k}} \mathbb{E}\left[\left(F(t)-F^{\sim i}(t)\right)^{2}\right] d F(t)}{\int_{\mathbb{R}^{k}} F(t)(1-F(t)) d F(t)}
+
+The above first and total order indices are estimated using the Pick-and-Freeze approach. This requires :math:`N(d+2)` model evaluations, where :math:`N` is the number of samples. (For implementation details, see also [6]_.)
+
+Cramér-von Mises Class
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The :class:`Cramér-von Mises` class is imported using the following command:
+
+>>> from UQpy.sensitivity.cramer_von_mises import CramerVonMises
+
+Methods
+"""""""
+.. autoclass:: UQpy.sensitivity.CramervonMises
+ :members: run
+
+Attributes
+""""""""""
+.. autoattribute:: UQpy.sensitivity.CramervonMises.CVM_i
+.. autoattribute:: UQpy.sensitivity.CramervonMises.CI_CVM_i
+.. autoattribute:: UQpy.sensitivity.CramervonMises.sobol_i
+.. autoattribute:: UQpy.sensitivity.CramervonMises.sobol_total_i
+.. autoattribute:: UQpy.sensitivity.CramervonMises.n_samples
+.. autoattribute:: UQpy.sensitivity.CramervonMises.num_vars
+
+
+Examples
+""""""""""
+
+.. toctree::
+
+ Cramér-von Mises Examples <../auto_examples/sensitivity/cramer_von_mises/index>
diff --git a/docs/source/sensitivity/index.rst b/docs/source/sensitivity/index.rst
index 0e5fef5c0..324fbf666 100644
--- a/docs/source/sensitivity/index.rst
+++ b/docs/source/sensitivity/index.rst
@@ -6,6 +6,7 @@ This module contains functionality for all the sampling methods supported in :py
The module currently contains the following classes:
- :py:class:`.Sobol`: Class to compute Sobol sensitivity indices.
+- :py:class:`.CramervonMises`: Class to compute Cramér-von Mises sensitivity indices.
- :py:class:`.MorrisSensitivity`: Class to perform Morris.
- :py:class:`.PceSensitivity`: Class to compute the sensitivity indices using the :class:`.PolynomialChaosExpansion` method.
@@ -20,3 +21,4 @@ Sensitivity analysis comprises techniques focused on determining how the variati
Morris Sensitivity
Polynomial Chaos Sensitivity
Sobol Sensitivity
+ Cramér-von Mises Sensitivity
From 4d4a012828c69713b17dc78fb565a48d12738695 Mon Sep 17 00:00:00 2001
From: Prateek Bhustali
Date: Sun, 8 May 2022 20:40:14 +0200
Subject: [PATCH 11/59] =?UTF-8?q?Added=20examples=20Cram=C3=A9r-von=20Mise?=
=?UTF-8?q?s=20sensitivity?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../cramer_von_mises/local_exponential.py | 20 ++++++
.../cramer_von_mises/local_sobol_func.py | 42 +++++++++++
.../cramer_von_mises/plot_cvm_exponential.py | 58 +++++++++++++++
.../cramer_von_mises/plot_cvm_sobol_func.py | 70 +++++++++++++++++++
docs/source/conf.py | 2 +
5 files changed, 192 insertions(+)
create mode 100644 docs/code/sensitivity/cramer_von_mises/local_exponential.py
create mode 100644 docs/code/sensitivity/cramer_von_mises/local_sobol_func.py
create mode 100644 docs/code/sensitivity/cramer_von_mises/plot_cvm_exponential.py
create mode 100644 docs/code/sensitivity/cramer_von_mises/plot_cvm_sobol_func.py
diff --git a/docs/code/sensitivity/cramer_von_mises/local_exponential.py b/docs/code/sensitivity/cramer_von_mises/local_exponential.py
new file mode 100644
index 000000000..1fd0ef0d9
--- /dev/null
+++ b/docs/code/sensitivity/cramer_von_mises/local_exponential.py
@@ -0,0 +1,20 @@
+"""
+
+Auxiliary file
+==============================================
+
+"""
+
+import numpy as np
+
+
+def evaluate(X: np.array) -> np.array:
+ r"""A non-linear function that is used to demonstrate sensitivity index.
+
+ .. math::
+ f(x) = \exp(x_1 + 2*x_2)
+ """
+
+ Y = np.exp(X[:, 0] + 2 * X[:, 1])
+
+ return Y
diff --git a/docs/code/sensitivity/cramer_von_mises/local_sobol_func.py b/docs/code/sensitivity/cramer_von_mises/local_sobol_func.py
new file mode 100644
index 000000000..1ccabc6dd
--- /dev/null
+++ b/docs/code/sensitivity/cramer_von_mises/local_sobol_func.py
@@ -0,0 +1,42 @@
+"""
+
+Auxiliary file
+==============================================
+
+"""
+
+import numpy as np
+import copy
+
+
+def evaluate(X, a_values):
+
+ dims = len(a_values)
+ g = 1
+
+ for i in range(dims):
+ g_i = (np.abs(4 * X[:, i] - 2) + a_values[i]) / (1 + a_values[i])
+ g *= g_i
+
+ return g
+
+
+def sensitivities(a_values):
+
+ dims = len(a_values)
+
+ Total_order = np.zeros((dims, 1))
+
+ V_i = (3 * (1 + a_values) ** 2) ** (-1)
+
+ total_variance = np.prod(1 + V_i) - 1
+
+ First_order = V_i / total_variance
+
+ for i in range(dims):
+
+ rem_First_order = copy.deepcopy(V_i)
+ rem_First_order[i] = 0
+ Total_order[i] = V_i[i] * np.prod(rem_First_order + 1) / total_variance
+
+ return First_order.reshape(-1, 1), Total_order
diff --git a/docs/code/sensitivity/cramer_von_mises/plot_cvm_exponential.py b/docs/code/sensitivity/cramer_von_mises/plot_cvm_exponential.py
new file mode 100644
index 000000000..244fd7805
--- /dev/null
+++ b/docs/code/sensitivity/cramer_von_mises/plot_cvm_exponential.py
@@ -0,0 +1,58 @@
+"""
+
+Exponential function
+==============================================
+
+.. math::
+ f(x) := \exp(x_1 + 2x_2), \quad x_1, x_2 \sim \mathcal{N}(0, 1)
+
+"""
+
+# %%
+from UQpy.run_model.RunModel import RunModel
+from UQpy.run_model.model_execution.PythonModel import PythonModel
+from UQpy.distributions import Normal
+from UQpy.distributions.collection.JointIndependent import JointIndependent
+from UQpy.sensitivity.cramer_von_mises import CramervonMises as cvm
+
+# %%
+# Create Model object
+model = PythonModel(
+ model_script="local_exponential.py",
+ model_object_name="evaluate",
+ var_names=[r"$X_1$", "$X_2$"],
+ delete_files=True,
+)
+
+runmodel_obj = RunModel(model=model)
+
+# Define distribution object
+dist_object = JointIndependent([Normal(0, 1)] * 2)
+
+# %% [markdown]
+# Compute Cramer-von Mises indices
+
+# %%
+# create cvm object
+SA = cvm(runmodel_obj, dist_object)
+
+# Compute Sobol indices using the pick and freeze algorithm
+computed_indices = SA.run(n_samples=20_000, estimate_sobol_indices=True)
+
+# %% [markdown]
+# Cramer-von Mises sensitivity analysis
+#
+# Expected value of the sensitivity indices:
+#
+# $S^1_{CVM} = \frac{6}{\pi} \operatorname{arctan}(2) - 2 \approx 0.1145$
+#
+# $S^2_{CVM} = \frac{6}{\pi} \operatorname{arctan}(\sqrt{19}) - 2 \approx 0.5693$
+
+# %%
+computed_indices["CVM_i"]
+
+# %%
+computed_indices["sobol_i"]
+
+# %%
+computed_indices["sobol_total_i"]
diff --git a/docs/code/sensitivity/cramer_von_mises/plot_cvm_sobol_func.py b/docs/code/sensitivity/cramer_von_mises/plot_cvm_sobol_func.py
new file mode 100644
index 000000000..da17e3e2f
--- /dev/null
+++ b/docs/code/sensitivity/cramer_von_mises/plot_cvm_sobol_func.py
@@ -0,0 +1,70 @@
+r"""
+
+Sobol function
+==============================================
+
+.. math::
+
+ g(x_1, x_2, \ldots, x_D) := \prod_{i=1}^{D} \frac{|4x_i - 2| + a_i}{1 + a_i},
+
+where,
+
+.. math::
+ x_i \sim \mathcal{U}(0, 1), \quad a_i \in \mathbb{R}.
+
+"""
+
+# %%
+import numpy as np
+
+from UQpy.run_model.RunModel import RunModel
+from UQpy.run_model.model_execution.PythonModel import PythonModel
+from UQpy.distributions import Uniform
+from UQpy.distributions.collection.JointIndependent import JointIndependent
+from UQpy.sensitivity.cramer_von_mises import CramervonMises as cvm
+
+# %%
+# Create Model object
+num_vars = 6
+a_vals = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
+
+model = PythonModel(
+ model_script="local_sobol_func.py",
+ model_object_name="evaluate",
+ var_names=[r"$X_1$", "$X_2$"],
+ delete_files=True,
+ a_values=a_vals,
+)
+
+runmodel_obj = RunModel(model=model)
+
+# Define distribution object
+dist_object = JointIndependent([Uniform(0, 1)] * num_vars)
+
+# %%
+SA = cvm(runmodel_obj, dist_object)
+
+# Compute Sobol indices using the pick and freeze algorithm
+computed_indices = SA.run(n_samples=20_000, estimate_sobol_indices=True)
+
+# %%
+computed_indices["CVM_i"]
+
+# %% [markdown]
+# Sobol indices computed analytically
+#
+# $S_1$ = 0.46067666
+#
+# $S_2$ = 0.20474518
+#
+# $S_3$ = 0.11516917
+#
+# $S_4$ = 0.07370827
+#
+# $S_5$ = 0.0511863
+#
+# $S_6$ = 0.03760626
+#
+
+# %%
+computed_indices["sobol_i"]
diff --git a/docs/source/conf.py b/docs/source/conf.py
index 318e66625..9c31fa120 100644
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -92,6 +92,7 @@
"../code/transformations/nataf",
"../code/sensitivity/morris",
"../code/sensitivity/sobol",
+ "../code/sensitivity/cramer_von_mises",
"../code/stochastic_processes/bispectral",
"../code/stochastic_processes/karhunen_loeve",
"../code/stochastic_processes/spectral",
@@ -127,6 +128,7 @@
"auto_examples/transformations/nataf",
"auto_examples/sensitivity/morris",
"auto_examples/sensitivity/sobol",
+ "auto_examples/sensitivity/cramer_von_mises",
"auto_examples/stochastic_processes/bispectral",
"auto_examples/stochastic_processes/karhunen_loeve",
"auto_examples/stochastic_processes/spectral",
From 8ab489215f852ec46de90164ad92bf4281dd39a5 Mon Sep 17 00:00:00 2001
From: Prateek Bhustali
Date: Sun, 8 May 2022 21:16:56 +0200
Subject: [PATCH 12/59] Fixed references in documentation
---
docs/source/sensitivity/cramer_von_mises.rst | 8 ++++++--
docs/source/sensitivity/sobol.rst | 15 ++++++++++-----
2 files changed, 16 insertions(+), 7 deletions(-)
diff --git a/docs/source/sensitivity/cramer_von_mises.rst b/docs/source/sensitivity/cramer_von_mises.rst
index 0c748bbb7..ccd412af9 100644
--- a/docs/source/sensitivity/cramer_von_mises.rst
+++ b/docs/source/sensitivity/cramer_von_mises.rst
@@ -1,7 +1,7 @@
Cramér-von Mises indices
----------------------------------------
-A sensitivity index based on the Cramér-von Mises distance. In contrast to variance based Sobol indices it takes into account the whole distribution of the model output and is therefore considered as a moment-free method [5]_. Furthermore the index can be naturally extended to multivariate model outputs (not implemented yet in UQPy).
+A sensitivity index based on the Cramér-von Mises distance. In contrast to variance based Sobol indices it takes into account the whole distribution of the model output and is therefore considered as a moment-free method [1]_. Furthermore the index can be naturally extended to multivariate model outputs (not implemented yet in UQPy).
Consider a model :math:`Y=f(X): \mathbb{R}^d \rightarrow \mathbb{R}^k` with :math:`d` inputs :math:`X_{(1)}, X_{(2)}, \ldots, X_{(d)}` and :math:`k` outputs :math:`Y_{(1)}, Y_{(2)}, \ldots, Y_{(k)}`. We define the cumulative distribution function :math:`F(t)` of :math:`Y` as:
@@ -29,7 +29,11 @@ and the total Cramér-von Mises index :math:`S_{2, C V M}^{T o t, i}` (for input
S_{2, C V M}^{T o t, i}:=1-S_{2, C V M}^{\sim i}=1-\frac{\int_{\mathbb{R}^{k}} \mathbb{E}\left[\left(F(t)-F^{\sim i}(t)\right)^{2}\right] d F(t)}{\int_{\mathbb{R}^{k}} F(t)(1-F(t)) d F(t)}
-The above first and total order indices are estimated using the Pick-and-Freeze approach. This requires :math:`N(d+2)` model evaluations, where :math:`N` is the number of samples. (For implementation details, see also [6]_.)
+The above first and total order indices are estimated using the Pick-and-Freeze approach. This requires :math:`N(d+2)` model evaluations, where :math:`N` is the number of samples. (For implementation details, see also [2]_.)
+
+.. [1] Gamboa, F., Klein, T., & Lagnoux, A. (2018). Sensitivity Analysis Based on Cramér-von Mises Distance. SIAM/ASA Journal on Uncertainty Quantification, 6(2), 522-548. doi:10.1137/15M1025621. (`Link `_)
+
+.. [2] Gamboa, F., Gremaud, P., Klein, T., & Lagnoux, A. (2020). Global Sensitivity Analysis: a new generation of mighty estimators based on rank statistics. arXiv [math.ST]. (`Link `_)
Cramér-von Mises Class
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/docs/source/sensitivity/sobol.rst b/docs/source/sensitivity/sobol.rst
index 1966330c1..462db15fd 100644
--- a/docs/source/sensitivity/sobol.rst
+++ b/docs/source/sensitivity/sobol.rst
@@ -31,7 +31,7 @@ The Sobol indices are computed using the Pick-and-Freeze approach for single out
Compared to "Sobol1993", the "Janon2014" estimator makes more efficient use of model evaluations and produces better smaller confidence intervals.
-- Sobol1993: Requires :math:`N(m + 1)` model evaluations [2]_.
+- Sobol1993: Requires :math:`N(m + 1)` model evaluations [1]_.
.. math::
S_{i} = \frac{\mathbb{V}\left[E\left(Y \mid X_{i}\right)\right]}{\mathbb{V}(Y)} = \frac{ (1/N) Y_{A} \cdot Y_{C_{i}}-f_{0}^{2}}{(1 / N) Y_{A} \cdot Y_{A}-f_{0}^{2}}
@@ -39,15 +39,15 @@ Compared to "Sobol1993", the "Janon2014" estimator makes more efficient use of m
.. math::
y_{A}=f(A), \quad y_{C_{i}}=f(C_{i}), \quad f_{0}^{2}=\left(\frac{1}{N} \sum_{j=1}^{N} y_{A}^{(j)} \right)^{2}
-- Saltelli2002: Requires :math:`N(2m + 2)` model evaluations [4]_.
+- Saltelli2002: Requires :math:`N(2m + 2)` model evaluations [2]_.
2. **Second order indices** (:math:`S_{ij}`)
-- Saltelli2002: Requires :math:`N(2m + 2)` model evaluations [4]_.
+- Saltelli2002: Requires :math:`N(2m + 2)` model evaluations [2]_.
3. **Total order indices** (:math:`S_{T_{i}}`)
-- Homma1996: Requires :math:`N(m + 1)` model evaluations [2]_.
+- Homma1996: Requires :math:`N(m + 1)` model evaluations [1]_.
.. math::
S_{T_{i}} = 1 - \frac{\mathbb{V}\left[E\left(Y \mid \mathbf{X}_{\sim_{i}}\right)\right]}{\mathbb{V}(Y)} = 1 - \frac{ (1 / N) Y_{B} \cdot Y_{C_{i}}-f_{0}^{2}}{(1 / N) Y_{A} \cdot Y_{A}-f_{0}^{2}}
@@ -55,9 +55,14 @@ Compared to "Sobol1993", the "Janon2014" estimator makes more efficient use of m
.. math::
y_{A}=f(A), \quad y_{B}=f(B), \quad y_{C_{i}}=f(C_{i}), \quad f_{0}^{2}=\left(\frac{1}{2N} \sum_{j=1}^{N} y_{A}^{(j)} + y_{B}^{(j)} \right)^{2}
-- Saltelli2002: Requires :math:`N(2m + 2)` model evaluations [4]_.
+- Saltelli2002: Requires :math:`N(2m + 2)` model evaluations [2]_.
+.. [1] Saltelli, A. (2008). Global sensitivity analysis: the primer.
+ John Wiley. ISBN: 9780470059975
+
+.. [2] Saltelli, A. (2002). Making best use of model evaluations to compute sensitivity indices. (`Link `_)
+
Sobol Class
^^^^^^^^^^^^^^^^^^
From 852be9d3f41a0815be3d831088c21579ca76fb04 Mon Sep 17 00:00:00 2001
From: Prateek Bhustali
Date: Sun, 8 May 2022 21:17:22 +0200
Subject: [PATCH 13/59] Rearranged order in index
---
docs/source/sensitivity/index.rst | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/docs/source/sensitivity/index.rst b/docs/source/sensitivity/index.rst
index 324fbf666..4bb68e1a9 100644
--- a/docs/source/sensitivity/index.rst
+++ b/docs/source/sensitivity/index.rst
@@ -5,10 +5,10 @@ This module contains functionality for all the sampling methods supported in :py
The module currently contains the following classes:
-- :py:class:`.Sobol`: Class to compute Sobol sensitivity indices.
- :py:class:`.CramervonMises`: Class to compute Cramér-von Mises sensitivity indices.
- :py:class:`.MorrisSensitivity`: Class to perform Morris.
- :py:class:`.PceSensitivity`: Class to compute the sensitivity indices using the :class:`.PolynomialChaosExpansion` method.
+- :py:class:`.Sobol`: Class to compute Sobol sensitivity indices.
Sensitivity analysis comprises techniques focused on determining how the variations of input variables :math:`X=\left[ X_{1}, X_{2},…,X_{d} \right]` of a mathematical model influence the response value :math:`Y=h(X)`.
@@ -18,7 +18,7 @@ Sensitivity analysis comprises techniques focused on determining how the variati
:hidden:
:caption: Sensitivity
+ Cramér-von Mises Sensitivity
Morris Sensitivity
Polynomial Chaos Sensitivity
Sobol Sensitivity
- Cramér-von Mises Sensitivity
From 69ac38aec2546bd9b6afb8b634eb9d5ff4fd6115 Mon Sep 17 00:00:00 2001
From: Prateek Bhustali
Date: Sun, 8 May 2022 21:18:11 +0200
Subject: [PATCH 14/59] Minor fixes in docstrings
---
src/UQpy/sensitivity/cramer_von_mises.py | 9 +++++----
1 file changed, 5 insertions(+), 4 deletions(-)
diff --git a/src/UQpy/sensitivity/cramer_von_mises.py b/src/UQpy/sensitivity/cramer_von_mises.py
index 9cb2fdfe6..229a8b761 100644
--- a/src/UQpy/sensitivity/cramer_von_mises.py
+++ b/src/UQpy/sensitivity/cramer_von_mises.py
@@ -41,6 +41,9 @@ class CramervonMises(Sensitivity):
random variable, or :class:`.JointIndependent` object \
(multivariate RV with independent marginals).
+ :param random_state: Random seed used to initialize the pseudo-random number \
+ generator. Default is :any:`None`.
+
**Methods:**
"""
@@ -95,15 +98,13 @@ def run(
Compute the Cramér-von Mises indices.
:param n_samples: Number of samples used to compute the Cramér-von Mises indices. \
- If :code:`None`, the number of samples is set to the number of samples \
- in the model.
+ Default is 1,000.
:param estimate_sobol_indices: If :code:`True`, the Sobol indices are estimated \
using the pick-and-freeze samples.
:param num_bootstrap_samples: Number of bootstrap samples used to estimate the \
- Sobol indices. If :code:`None`, the number of bootstrap samples is set \
- to the number of samples in the model.
+ Sobol indices. Default is :any:`None`.
:param confidence_level: Confidence level used to compute the confidence \
intervals of the Cramér-von Mises indices.
From 3f59fd89706be8e48dfdae1d1efef719aaa7f2d4 Mon Sep 17 00:00:00 2001
From: Prateek Bhustali
Date: Sun, 8 May 2022 21:18:40 +0200
Subject: [PATCH 15/59] Changed docstrings to match rtd_theme
---
src/UQpy/sensitivity/sobol.py | 170 ++++++++++++----------------------
1 file changed, 57 insertions(+), 113 deletions(-)
diff --git a/src/UQpy/sensitivity/sobol.py b/src/UQpy/sensitivity/sobol.py
index d4fb1de56..0eb327aaf 100644
--- a/src/UQpy/sensitivity/sobol.py
+++ b/src/UQpy/sensitivity/sobol.py
@@ -69,65 +69,16 @@ class Sobol(Sensitivity):
For time-series models, the sensitivity indices are computed for each
time instant separately. (Pointwise-in-time Sobol indices)
- **Inputs:**
-
- * **runmodel_object** (``RunModel`` object):
- The computational model. It should be of type
- ``RunModel`` (see ``RunModel`` class).
- The output QoI can be a scalar or vector of
- length `ny`, then the sensitivity indices of
- all `ny` outputs are computed independently.
+ :param runmodel_object: The computational model. It should be of type :class:`.RunModel`. \
+ The output QoI can be a scalar or vector of length :code:`ny`, then the sensitivity \
+ indices of all :code:`ny` outputs are computed independently.
- * **dist_object** ((list of) ``Distribution`` object(s)):
- List of ``Distribution`` objects corresponding
- to each random variable, or ``JointInd`` object
+ :param distributions: List of :class:`.Distribution` objects corresponding to each \
+ random variable, or :class:`.JointIndependent` object \
(multivariate RV with independent marginals).
- * **random_state** (None or `int` or ``numpy.random.RandomState`` object):
- Random seed used to initialize the
- pseudo-random number generator.
- Default is None.
-
- **Attributes:**
-
- * **sobol_i** (`ndarray`):
- First order sensitivity indices.
- Shape: `(num_vars, n_outputs)`
-
- * **sobol_total_i** (`ndarray`):
- Total order sensitivity indices.
- Shape: `(num_vars, n_outputs)`
-
- * **sobol_ij** (`ndarray`):
- Second order sensitivity indices.
- Shape: `(num_second_order_terms, n_outputs)`
-
- * **CI_sobol_i** (`ndarray`):
- Confidence intervals for the first order sensitivity indices.
- Shape: `(num_vars, 2)`
-
- if multioutput: Shape: `(n_outputs, num_vars, 2)`
-
- * **CI_sobol_total_i** (`ndarray`):
- Confidence intervals for the total order sensitivity indices.
- Shape: `(num_vars, 2)`
-
- if multioutput: Shape: `(n_outputs, num_vars, 2)`
-
- * **CI_sobol_ij** (`ndarray`):
- Confidence intervals for the second order Sobol indices.
- Shape: `(num_second_order_terms, 2)`
-
- if multioutput: Shape: `(n_outputs, num_second_order_terms, 2)`
-
- * **n_samples** (`int`):
- Number of samples used to compute the sensitivity indices.
-
- * **num_vars** (`int`):
- Number of model input variables.
-
- * **multioutput** (`bool`):
- True if the model has multiple outputs.
+ :param random_state: Random seed used to initialize the pseudo-random number \
+ generator. Default is :any:`None`.
**Methods:**
"""
@@ -150,6 +101,33 @@ def __init__(
# add the handler to the logger
self.logger.addHandler(ch)
+ self.sobol_i = None
+ "First order Sobol indices, :class:`numpy.ndarray` of shape `(num_vars, n_outputs)`"
+
+ self.sobol_total_i = None
+ "Total order Sobol indices, :class:`numpy.ndarray` of shape `(num_vars, n_outputs)`"
+
+ self.sobol_ij = None
+ "Second order Sobol indices, :class:`numpy.ndarray` of shape `(num_second_order_terms, n_outputs)`"
+
+ self.CI_sobol_i = None
+ "Confidence intervals for the first order Sobol indices, :class:`numpy.ndarray` of shape `(num_vars, 2)`"
+
+ self.CI_sobol_total_i = None
+ "Confidence intervals for the total order Sobol indices, :class:`numpy.ndarray` of shape `(num_vars, 2)`"
+
+ self.CI_sobol_ij = None
+ "Confidence intervals for the second order Sobol indices, :class:`numpy.ndarray` of shape `(num_second_order_terms, 2)`"
+
+ self.n_samples = None
+ "Number of samples used to compute the sensitivity indices, :class:`int`"
+
+ self.num_vars = None
+ "Number of model input variables, :class:`int`"
+
+ self.multioutput = None
+ "True if the model has multiple outputs, :class:`bool`"
+
def run(
self,
n_samples=1_000,
@@ -164,71 +142,37 @@ def run(
"""
Compute the sensitivity indices and confidence intervals.
- **Inputs:**
-
- * **n_samples** (`int`):
- Number of samples used to compute the sensitivity indices.
+ :param n_samples: Number of samples used to compute the sensitivity indices. \
Default is 1,000.
- * **num_boostrap_samples** (`int`):
- Number of bootstrap samples used to compute
- the confidence intervals.
- Default is None.
-
- * **confidence_interval** (`float`):
- Confidence interval used to compute the confidence intervals.
- Default is 0.95.
-
- * **estimate_second_order** (`bool`):
- If True, compute the second order sensitivity indices.
- Default is False.
-
- * **first_order_scheme** (`str`):
- Scheme used to compute the first order Sobol indices.
- Default is "Sobol1993".
-
- * **total_order_scheme** (`str`):
- Scheme used to compute the total order Sobol indices.
- Default is "Homma1996".
-
- * **second_order_scheme** (`str`):
- Scheme used to compute the second order Sobol indices.
- Default is "Saltelli2002".
-
- **Outputs:**
-
- * **computed_indices** (`dict`):
- Dictionary containing the computed sensitivity indices.
-
- * **sobol_i** (`ndarray`):
- First order Sobol indices.
- Shape: `(num_vars, n_outputs)`
-
- * **sobol_total_i** (`ndarray`):
- Total order Sobol indices.
- Shape: `(num_vars, n_outputs)`
-
- * **sobol_ij** (`ndarray`):
- Second order Sobol indices.
- Shape: `(num_second_order_terms, n_outputs)`
+ :param num_bootstrap_samples: Number of bootstrap samples used to compute the \
+ confidence intervals. Default is :any:`None`.
- * **CI_sobol_i** (`ndarray`):
- Confidence intervals for the first order Sobol indices.
- Shape: `(num_vars, 2)`
+ :param confidence_interval: Confidence level used to compute the confidence \
+ intervals. Default is 0.95.
- if multioutput: Shape: `(n_outputs, num_vars, 2)`
+ :param estimate_second_order: If True, the second order Sobol indices are \
+ estimated. Default is False.
- * **CI_sobol_total_i** (`ndarray`):
- Confidence intervals for the total order Sobol indices.
- Shape: `(num_vars, 2)`
+ :param first_order_scheme: Scheme used to compute the first order Sobol \
+ indices. Default is "Janon2014".
- if multioutput: Shape: `(n_outputs, num_vars, 2)`
+ :param total_order_scheme: Scheme used to compute the total order Sobol \
+ indices. Default is "Homma1996".
- * **CI_sobol_ij** (`ndarray`):
- Confidence intervals for the second order Sobol indices.
- Shape: `(num_second_order_terms, 2)`
+ :param second_order_scheme: Scheme used to compute the second order \
+ Sobol indices. Default is "Saltelli2002".
- if multioutput: Shape: `(n_outputs, num_second_order_terms, 2)`
+ :return: A :class:`dict` with the following keys: \
+ :code:`sobol_i` of shape :code:`(num_vars, 1)`, \
+ :code:`sobol_total_i` of shape :code:`(num_vars, 1)`, \
+ :code:`sobol_ij` of shape :code:`(num_second_order_terms, 1)`, \
+ :code:`CI_sobol_i` of shape :code:`(num_vars, 2)`, \
+ if multioutput: Shape: `(n_outputs, num_vars, 2)`, \
+ :code:`CI_sobol_total_i` of shape :code:`(num_vars, 2)`, \
+ if multioutput: Shape: `(n_outputs, num_vars, 2)`, \
+ :code:`CI_sobol_ij` of shape :code:`(num_second_order_terms, 2)`
+ if multioutput: Shape: `(n_outputs, num_second_order_terms, 2)`, \
"""
# Check n_samples data type
From 92a724c1efd93d2af33233fd5491afc3b773ab21 Mon Sep 17 00:00:00 2001
From: Prateek Bhustali
Date: Sun, 8 May 2022 23:48:32 +0200
Subject: [PATCH 16/59] Added Chatterjee sensitivity index
---
src/UQpy/sensitivity/__init__.py | 2 +
src/UQpy/sensitivity/chatterjee.py | 448 +++++++++++++++++++++++++++++
2 files changed, 450 insertions(+)
create mode 100644 src/UQpy/sensitivity/chatterjee.py
diff --git a/src/UQpy/sensitivity/__init__.py b/src/UQpy/sensitivity/__init__.py
index 10ca1565d..73c4166da 100644
--- a/src/UQpy/sensitivity/__init__.py
+++ b/src/UQpy/sensitivity/__init__.py
@@ -2,8 +2,10 @@
from UQpy.sensitivity.PceSensitivity import PceSensitivity
from UQpy.sensitivity.sobol import Sobol
from UQpy.sensitivity.cramer_von_mises import CramervonMises
+from UQpy.sensitivity.chatterjee import Chatterjee
from . import MorrisSensitivity
from . import PceSensitivity
from . import Sobol
from . import CramervonMises
+from . import Chatterjee
diff --git a/src/UQpy/sensitivity/chatterjee.py b/src/UQpy/sensitivity/chatterjee.py
new file mode 100644
index 000000000..9bea38cdb
--- /dev/null
+++ b/src/UQpy/sensitivity/chatterjee.py
@@ -0,0 +1,448 @@
+"""
+This module contains the Chatterjee coefficient of correlation proposed
+in [1]_.
+
+Using the rank statistics, we can also estimate the Sobol indices proposed by
+Gamboa et al. [2]_.
+
+References
+----------
+
+.. [1] Sourav Chatterjee (2021) A New Coefficient of Correlation, Journal of the
+ American Statistical Association, 116:536, 2009-2022,
+ DOI: 10.1080/01621459.2020.1758115
+
+.. [2] Fabrice Gamboa, Pierre Gremaud, Thierry Klein, and Agnès Lagnoux. (2020).
+ Global Sensitivity Analysis: a new generation of mighty estimators
+ based on rank statistics.
+
+"""
+
+import logging
+
+import numpy as np
+import scipy.stats
+
+from UQpy.sensitivity.baseclass.sensitivity import Sensitivity
+from UQpy.sensitivity.sobol import compute_first_order as compute_first_order_sobol
+from UQpy.utilities.UQpyLoggingFormatter import UQpyLoggingFormatter
+
+
+class Chatterjee(Sensitivity):
+ """
+ Compute sensitivity indices using the Chatterjee correlation coefficient.
+
+ Using the same model evaluations, we can also estimate the Sobol indices.
+
+ :param runmodel_object: The computational model. It should be of type :class:`.RunModel`. \
+ The output QoI can be a scalar or vector of length :code:`ny`, then the sensitivity \
+ indices of all :code:`ny` outputs are computed independently.
+
+ :param distributions: List of :class:`.Distribution` objects corresponding to each \
+ random variable, or :class:`.JointIndependent` object \
+ (multivariate RV with independent marginals).
+
+ :param random_state: Random seed used to initialize the pseudo-random number \
+ generator. Default is :any:`None`.
+
+ **Methods:**
+ """
+
+ def __init__(self, runmodel_object, dist_object, random_state=None, **kwargs):
+ super().__init__(
+ runmodel_object, dist_object, random_state=random_state, **kwargs
+ )
+
+ # Create logger with the same name as the class
+ self.logger = logging.getLogger(__name__)
+ self.logger.setLevel(logging.ERROR)
+ frmt = UQpyLoggingFormatter()
+
+ # create console handler with a higher log level
+ ch = logging.StreamHandler()
+ ch.setFormatter(frmt)
+
+ # add the handler to the logger
+ self.logger.addHandler(ch)
+
+ self.chatterjee_i = None
+ "Chatterjee sensitivity indices (First order), :class:`numpy.ndarray` of shape :code:`(num_vars, 1)`"
+
+ self.sobol_i = None
+ "Sobol indices computed using the rank statistics, :class:`numpy.ndarray` of shape :code:`(num_vars, 1)`"
+
+ self.CI_chatterjee_i = None
+ "Confidence intervals for the Chatterjee sensitivity indices, :class:`numpy.ndarray` of shape :code:`(num_vars, 2)`"
+
+ self.num_vars = None
+ "Number of input random variables, :class:`int`"
+
+ self.n_samples = None
+ "Number of samples used to estimate the sensitivity indices, :class:`int`"
+
+ def run(
+ self,
+ n_samples=1_000,
+ estimate_sobol_indices=False,
+ num_bootstrap_samples=None,
+ confidence_level=0.95,
+ ):
+ """
+ Compute the sensitivity indices using the Chatterjee method.
+
+ :param n_samples: Number of samples used to compute the Cramér-von Mises indices. \
+ Default is 1,000.
+
+ :param estimate_sobol_indices: If :code:`True`, the Sobol indices are estimated \
+ using the pick-and-freeze samples.
+
+ :param num_bootstrap_samples: Number of bootstrap samples used to estimate the \
+ Sobol indices. Default is :any:`None`.
+
+ :param confidence_level: Confidence level used to compute the confidence \
+ intervals of the Cramér-von Mises indices.
+
+ :return: A :class:`dict` with the following keys: \
+ :code:`'chatterjee_i'` of shape :code:`(num_vars, 1)`, \
+ :code:`'CI_chatterjee_i'` of shape :code:`(num_vars, 2)`, \
+ :code:`'sobol_i'` of shape :code:`(num_vars, 1)`.
+
+ """
+
+ # Check nsamples
+ self.n_samples = n_samples
+ if not isinstance(self.n_samples, int):
+ raise TypeError("UQpy: nsamples should be an integer")
+
+ # Check num_bootstrap_samples data type
+ if num_bootstrap_samples is not None:
+ if not isinstance(num_bootstrap_samples, int):
+ raise TypeError("UQpy: num_bootstrap_samples should be an integer.\n")
+ elif num_bootstrap_samples is None:
+ self.logger.info(
+ "UQpy: num_bootstrap_samples is set to None, confidence intervals will not be computed.\n"
+ )
+
+ ################## GENERATE SAMPLES ##################
+
+ A_samples = self.dist_object.rvs(self.n_samples, random_state=self.random_state)
+
+ self.logger.info("UQpy: Generated samples successfully.\n")
+
+ self.num_vars = A_samples.shape[1] # number of variables
+
+ ################# MODEL EVALUATIONS ####################
+
+ A_model_evals = self._run_model(A_samples).reshape(-1, 1)
+
+ self.logger.info("UQpy: Model evaluations completed.\n")
+
+ ######################### STORAGE ########################
+ # Create dictionary to store the sensitivity indices
+ computed_indices = {}
+
+ ################## COMPUTE CHATTERJEE INDICES ##################
+
+ self.chatterjee_i = self.compute_chatterjee_indices(A_samples, A_model_evals)
+
+ self.logger.info("UQpy: Chatterjee indices computed successfully.\n")
+
+ # Store the indices in the dictionary
+ computed_indices["chatterjee_i"] = self.chatterjee_i
+
+ ################## COMPUTE SOBOL INDICES ##################
+
+ self.logger.info("UQpy: Computing First order Sobol indices ...\n")
+
+ if estimate_sobol_indices:
+ f_C_i_model_evals = self.compute_rank_analog_of_f_C_i(
+ A_samples, A_model_evals
+ )
+
+ self.sobol_i = self.compute_Sobol_indices(A_model_evals, f_C_i_model_evals)
+
+ self.logger.info("UQpy: First order Sobol indices computed successfully.\n")
+
+ # Store the indices in the dictionary
+ computed_indices["sobol_i"] = self.sobol_i
+
+ ################## CONFIDENCE INTERVALS ####################
+
+ if num_bootstrap_samples is not None:
+
+ self.logger.info("UQpy: Computing confidence intervals ...\n")
+
+ estimator_inputs = [A_samples, A_model_evals]
+
+ self.CI_chatterjee_i = self.bootstrapping(
+ self.compute_chatterjee_indices,
+ estimator_inputs,
+ computed_indices["chatterjee_i"],
+ num_bootstrap_samples,
+ confidence_level,
+ )
+
+ self.logger.info(
+ "UQpy: Confidence intervals for Chatterjee indices computed successfully.\n"
+ )
+
+ computed_indices["CI_chatterjee_i"] = self.CI_chatterjee_i
+
+ return computed_indices
+
+ @staticmethod
+ def compute_chatterjee_indices(X, Y, seed=None):
+ r"""
+
+ Compute the Chatterjee sensitivity indices
+ between the input random vectors :math:`X=\left[ X_{1}, X_{2},…,X_{d} \right]`
+ and output random vector Y.
+
+ :param X: Input random vectors, :class:`numpy.ndarray` of shape :code:`(n_samples, num_vars)`
+
+ :param Y: Output random vector, :class:`numpy.ndarray` of shape :code:`(n_samples, 1)`
+
+ :param seed: Seed for the random number generator.
+
+ :return: Chatterjee sensitivity indices, :class:`numpy.ndarray` of shape :code:`(num_vars, 1)`
+
+ """
+
+ if seed is not None:
+ # set seed for reproducibility
+ np.random.seed(seed)
+
+ N = X.shape[0] # number of samples
+ m = X.shape[1] # number of variables
+
+ chatterjee_indices = np.zeros((m, 1))
+
+ for i in range(m):
+
+ # Samples of random variable X_i
+ X_i = X[:, i].reshape(-1, 1)
+
+ #! For ties in X_i
+ # we break ties uniformly at random
+ # Shuffle X_i and Y
+ _ix = np.arange(N) # indices of X_i
+ np.random.shuffle(_ix) # shuffle indices
+ X_i_shuffled = X_i[_ix] # shuffle X_i
+ Y_shuffled = Y[_ix] # shuffle Y
+
+ Z = np.hstack((X_i_shuffled, Y_shuffled))
+
+ # Sort the columns of Z by X_i
+ # such that the tuple (X_i, Y_i) is unchanged
+ Z_sorted = Z[Z[:, 0].argsort()]
+
+ # Find rank of y_i in the sorted columns of Y
+ # r[i] is number of j s.t. y[j] <= y[i],
+ # This is accomplished using rankdata with method='max'
+ # Example: Y = [1, 2, 3, 3, 4, 5], rank = [1, 2, 4, 4, 5, 6]
+ rank = scipy.stats.rankdata(Z_sorted[:, 1], method="max")
+
+ #! For ties in Y
+ # l[i] is number of j s.t. y[i] <= y[j],
+ # This is accomplished using rankdata with method='max'
+ # Example: Y = [1, 2, 3, 3, 4, 5], l = [6, 5, 4, 4, 2, 1]
+ # One could also use the Y_shuffled array, since sum2 only
+ # multiplies terms of same index, i.e l_i*(n - l_i)
+ L = scipy.stats.rankdata(-Z_sorted[:, 1], method="max")
+
+ sum1 = np.abs(rank[1:] - rank[:-1]).sum()
+
+ sum2 = np.sum(L * (N - L))
+
+ chatterjee_indices[i] = 1 - N * sum1 / (2 * sum2)
+
+ return chatterjee_indices
+
+ @staticmethod
+ def rank_analog_to_pickfreeze(X, j):
+ r"""
+ Computing the :math:`N(j)` for each :math:`j \in \{1, \ldots, n\}`
+ as in eq.(8) in [6]_, where :math:`n` is the size of :math:`X`.
+
+ .. math::
+ :nowrap:
+
+ \begin{equation}
+ N(j):=
+ \begin{cases}
+ \pi^{-1}(\pi(j)+1) &\text { if } \pi(j)+1 \leqslant n \\
+ \pi^{-1}(1) &\text { if } \pi(j)=n
+ \end{cases}
+ \end{equation}
+
+ where, :math:`\pi(j) := \mathrm{rank}(x_j)`
+
+ :param X: Input random vector, :class:`numpy.ndarray` of shape :code:`(n_samples, 1)`
+
+ :param j: Index of the sample :math:`j \in \{1, \ldots, n\}`
+
+ :return: :math:`N(j)` :class:`int`
+
+ """
+
+ N = X.shape[0] # number of samples
+
+ # Ranks of elements of X_i
+ # -1 so that the ranks are 0-based
+ # for convenience in indexing
+ rank_X = scipy.stats.rankdata(X) - 1
+ rank_X = rank_X.astype(int)
+
+ # Find rank of element j
+ rank_j = rank_X[j]
+
+ if rank_j + 1 <= N - 1:
+ # Get index of element: rank_j + 1
+ return np.where(rank_X == rank_j + 1)[0][0]
+
+ if rank_j == N - 1:
+ return np.where(rank_X == 0)[0][0]
+
+ @staticmethod
+ def rank_analog_to_pickfreeze_vec(X):
+ r"""
+ Computing the :math:`N(j)` for each :math:`j \in \{1, \ldots, n\}`
+ in a vectorized manner., where :math:`n` is the size of :math:`X`.
+
+ This method is significantly faster than the looping version
+ ``rank_analog_to_pickfreeze`` but is also more complicated.
+
+ .. math::
+ :nowrap:
+
+ \begin{equation}
+ N(j):=
+ \begin{cases}
+ \pi^{-1}(\pi(j)+1) &\text { if } \pi(j)+1 \leqslant n \\
+ \pi^{-1}(1) &\text { if } \pi(j)=n
+ \end{cases}
+ \end{equation}
+
+ where, :math:`\pi(j) := \mathrm{rank}(x_j)`
+
+ Key idea: :math:`\pi^{-1}` is rank_X.argsort() (
+ `see also `_)
+
+ Example:
+ X = [22, 74, 44, 11, 1]
+
+ N_J = [3, 5, 2, 1, 4] (1-based indexing)
+
+ N_J = [2, 4, 1, 0, 3] (0-based indexing)
+
+ :param X: Input random vector, :class:`numpy.ndarray` of shape :code:`(n_samples, 1)`
+
+ :return: :math:`N(j)`, :class:`numpy.ndarray` of shape :code:`(n_samples, 1)`
+
+ """
+
+ N = X.shape[0] # number of samples
+ N_func = np.zeros((N, 1))
+
+ # Ranks of elements of X_i
+ # -1 since ranks are 0-based
+ rank_X = scipy.stats.rankdata(X, method="ordinal") - 1
+ rank_X = rank_X.astype(int)
+
+ # Inverse of pi(j): j = pi^-1(rank_X(j))
+ #! This is non-trivial
+ pi_inverse = rank_X.argsort() # complexity: N*log(N)
+
+ # CONDITION 2
+ # Find j with rank_j == N-1
+ j_meets_condition_2 = pi_inverse[N - 1]
+ N_func[j_meets_condition_2] = pi_inverse[0]
+
+ # CONDITION 1
+ # Find j's with rank_j + 1 <= N-1
+ # term_1 = pi(j) + 1
+ j_remaining = np.delete(np.arange(N), j_meets_condition_2)
+ term_1 = rank_X[j_remaining] + 1
+
+ j_remaining_meet_condition_1 = pi_inverse[term_1]
+
+ # j_remaining_meet_condition_1 = np.where(rank_X_i == condition)
+ N_func[j_remaining, 0] = j_remaining_meet_condition_1
+
+ return N_func.astype(int)
+
+ @staticmethod
+ def compute_Sobol_indices(A_model_evals, C_i_model_evals):
+ r"""
+ A method to estimate the first order Sobol indices using
+ the Chatterjee method.
+
+ .. math::
+ :nowrap:
+
+ \begin{equation}
+ \xi_{n}^{\mathrm{Sobol}}\left(X_{1}, Y\right):=
+ \frac{\frac{1}{n} \sum_{j=1}^{n} Y_{j} Y_{N(j)}-\left(\frac{1}{n} \sum_{j=1}^{n} Y_{j}\right)^{2}}
+ {\frac{1}{n} \sum_{j=1}^{n}\left(Y_{j}\right)^{2}-\left(\frac{1}{n} \sum_{j=1}^{n} Y_{j}\right)^{2}}
+ \end{equation}
+
+ where the term :math:`Y_{N(j)}` is computed using the method:``rank_analog_to_pickfreeze_vec``.
+
+ :param A_model_evals: Model evaluations, :class:`numpy.ndarray` of shape :code:`(n_samples, 1)`
+
+ :param C_i_model_evals: Model evaluations, :class:`numpy.ndarray` of shape :code:`(n_samples, num_vars)`
+
+ :return: First order Sobol indices, :class:`numpy.ndarray` of shape :code:`(num_vars, 1)`
+
+ """
+
+ # extract shape
+ _shape = C_i_model_evals.shape
+
+ # convert C_i_model_evals to 3D array
+ # with n_outputs=1 in first dimension
+ n_outputs = 1
+ C_i_model_evals = C_i_model_evals.reshape((n_outputs, *_shape))
+
+ first_order_sobol = compute_first_order_sobol(
+ A_model_evals, None, C_i_model_evals, scheme="Sobol1993"
+ )
+
+ return first_order_sobol
+
+ def compute_rank_analog_of_f_C_i(self, A_samples, A_model_evals):
+ r"""
+ In the Pick and Freeze method, we use model evaluations
+ :math:`f_A`, :math:`f_B`, :math:`f_{C_{i}}`
+ to compute the Sobol indices.
+
+ Gamboa et al. provide a rank analog to :math:`f_{C_{i}}` in eq. (6) in [6]_.
+
+ **Inputs:**
+
+ * **A_samples** (`ndarray`):
+ Shape: `(n_samples, num_vars)`.
+
+ * **A_model_evals** (`ndarray`):
+ Shape: `(n_samples, 1)`.
+
+ **Outputs:**
+
+ * **A_i_model_evals** (`ndarray`):
+ Shape: `(n_samples, num_vars)`.
+
+ """
+
+ f_A = A_model_evals
+ N = f_A.shape[0]
+ m = self.num_vars
+
+ A_i_model_evals = np.zeros((N, m))
+
+ for i in range(m):
+
+ K = self.rank_analog_to_pickfreeze_vec(A_samples[:, i])
+
+ A_i_model_evals[:, i] = f_A[K].ravel()
+
+ return A_i_model_evals
From c86aea248903b03f72c0b918556d911b13ab3f41 Mon Sep 17 00:00:00 2001
From: Prateek Bhustali
Date: Sun, 8 May 2022 23:48:44 +0200
Subject: [PATCH 17/59] Minor docstring fix
---
src/UQpy/sensitivity/cramer_von_mises.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/UQpy/sensitivity/cramer_von_mises.py b/src/UQpy/sensitivity/cramer_von_mises.py
index 229a8b761..66421b440 100644
--- a/src/UQpy/sensitivity/cramer_von_mises.py
+++ b/src/UQpy/sensitivity/cramer_von_mises.py
@@ -83,7 +83,7 @@ def __init__(
"Number of samples used to compute the Cramér-von Mises indices, :class:`int`"
self.num_vars = None
- "Number of random variables, :class:`int`"
+ "Number of input random variables, :class:`int`"
def run(
self,
From 0b5a6662be6e673ff2ffd34807f1cb8afa3f5f53 Mon Sep 17 00:00:00 2001
From: Prateek Bhustali
Date: Sun, 8 May 2022 23:49:46 +0200
Subject: [PATCH 18/59] Added documentation Chatterjee sensitivity
---
docs/code/sensitivity/chatterjee/README.rst | 15 ++++++++
docs/source/conf.py | 2 +
docs/source/sensitivity/chatterjee.rst | 41 +++++++++++++++++++++
docs/source/sensitivity/index.rst | 2 +
4 files changed, 60 insertions(+)
create mode 100644 docs/code/sensitivity/chatterjee/README.rst
create mode 100644 docs/source/sensitivity/chatterjee.rst
diff --git a/docs/code/sensitivity/chatterjee/README.rst b/docs/code/sensitivity/chatterjee/README.rst
new file mode 100644
index 000000000..590eee2a7
--- /dev/null
+++ b/docs/code/sensitivity/chatterjee/README.rst
@@ -0,0 +1,15 @@
+Chatterjee indices
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+These examples serve as a guide for using the Chatterjee sensitivity module. They have been taken from various papers to enable validation of the implementation and have been referenced accordingly.
+
+1. Ishigami function
+
+2. Exponential function
+
+For the Exponential model, analytical Cramer-von Mises indices are available, since they are equivalent to the Chatterjee indices, they are shown here.
+
+3. Sobol function
+
+This example was considered in [1] page 18.
+
+.. [1] Fabrice Gamboa, Pierre Gremaud, Thierry Klein, and Agnès Lagnoux. (2020). Global Sensitivity Analysis: a new generation of mighty estimators based on rank statistics.
\ No newline at end of file
diff --git a/docs/source/conf.py b/docs/source/conf.py
index 9c31fa120..7d113e439 100644
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -93,6 +93,7 @@
"../code/sensitivity/morris",
"../code/sensitivity/sobol",
"../code/sensitivity/cramer_von_mises",
+ "../code/sensitivity/chatterjee",
"../code/stochastic_processes/bispectral",
"../code/stochastic_processes/karhunen_loeve",
"../code/stochastic_processes/spectral",
@@ -129,6 +130,7 @@
"auto_examples/sensitivity/morris",
"auto_examples/sensitivity/sobol",
"auto_examples/sensitivity/cramer_von_mises",
+ "auto_examples/sensitivity/chatterjee",
"auto_examples/stochastic_processes/bispectral",
"auto_examples/stochastic_processes/karhunen_loeve",
"auto_examples/stochastic_processes/spectral",
diff --git a/docs/source/sensitivity/chatterjee.rst b/docs/source/sensitivity/chatterjee.rst
new file mode 100644
index 000000000..88f77889b
--- /dev/null
+++ b/docs/source/sensitivity/chatterjee.rst
@@ -0,0 +1,41 @@
+Chatterjee indices
+----------------------------------------
+
+The Chatterjee index measures the strength of the relationship between :math:`X` and :math:`Y` using rank statistics.
+
+Consider :math:`n` samples of random variables :math:`X` and :math:`Y`, with :math:`(X_{(1)}, Y_{(1)}), \ldots,(X_{(n)}, Y_{(n)})` such that :math:`X_{(1)} \leq \cdots \leq X_{(n)}`. Here, random variable :math:`X` can be one of the inputs of a model and :math:`Y` be the model response. If :math:`X_{i}`'s have no ties, there is a unique way of doing this (case of ties is also taken into account in the implementation, see [1]_). Let :math:`r_{i}`` be the rank of :math:`Y_{(i)}`, that is, the number of :math:`j` such that :math:`Y_{(j)} \leq Y_{(i)}`.The Chatterjee index :math:`\xi_{n}(X, Y)` is defined as:
+
+.. math::
+
+ \xi_{n}(X, Y):=1-\frac{3 \sum_{i=1}^{n-1}\left|r_{i+1}-r_{i}\right|}{n^{2}-1}
+
+The Chatterjee index converges for :math:`n \rightarrow \infty` to the Cramér-von Mises index and is faster to estimate than using the Pick and Freeze approach in the Cramer-von Mises index.
+
+.. [1] Sourav Chatterjee (2021) A New Coefficient of Correlation, Journal of the American Statistical Association, 116:536, 2009-2022, DOI: 10.1080/01621459.2020.1758115 (`Link `_)
+
+Chatterjee Class
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The :class:`Chatterjee` class is imported using the following command:
+
+>>> from UQpy.sensitivity.chatterjee import Chatterjee
+
+Methods
+"""""""
+.. autoclass:: UQpy.sensitivity.Chatterjee
+ :members: run, compute_chatterjee_indices, rank_analog_to_pickfreeze, compute_Sobol_indices
+
+Attributes
+""""""""""
+.. autoattribute:: UQpy.sensitivity.Chatterjee.chatterjee_i
+.. autoattribute:: UQpy.sensitivity.Chatterjee.sobol_i
+.. autoattribute:: UQpy.sensitivity.Chatterjee.CI_chatterjee_i
+.. autoattribute:: UQpy.sensitivity.Chatterjee.num_vars
+.. autoattribute:: UQpy.sensitivity.Chatterjee.n_samples
+
+Examples
+""""""""""
+
+.. toctree::
+
+ Chatterjee Examples <../auto_examples/sensitivity/chatterjee/index>
diff --git a/docs/source/sensitivity/index.rst b/docs/source/sensitivity/index.rst
index 4bb68e1a9..e663eed66 100644
--- a/docs/source/sensitivity/index.rst
+++ b/docs/source/sensitivity/index.rst
@@ -5,6 +5,7 @@ This module contains functionality for all the sampling methods supported in :py
The module currently contains the following classes:
+- :py:class:`.CramervonMises`: Class to compute Chatterjee sensitivity indices.
- :py:class:`.CramervonMises`: Class to compute Cramér-von Mises sensitivity indices.
- :py:class:`.MorrisSensitivity`: Class to perform Morris.
- :py:class:`.PceSensitivity`: Class to compute the sensitivity indices using the :class:`.PolynomialChaosExpansion` method.
@@ -18,6 +19,7 @@ Sensitivity analysis comprises techniques focused on determining how the variati
:hidden:
:caption: Sensitivity
+ Chatterjee
Cramér-von Mises Sensitivity
Morris Sensitivity
Polynomial Chaos Sensitivity
From e4407c667e188acbaa0fe1af97727f3df95f1f18 Mon Sep 17 00:00:00 2001
From: Prateek Bhustali
Date: Sun, 8 May 2022 23:50:15 +0200
Subject: [PATCH 19/59] Added examples Chatterjee sensitivity
---
.../chatterjee/local_exponential.py | 20 ++++++
.../sensitivity/chatterjee/local_ishigami.py | 23 ++++++
.../chatterjee/local_sobol_func.py | 42 +++++++++++
.../chatterjee/plot_chatterjee_exponential.py | 54 ++++++++++++++
.../chatterjee/plot_chatterjee_ishigami.py | 58 +++++++++++++++
.../chatterjee/plot_chatterjee_sobol_func.py | 70 +++++++++++++++++++
6 files changed, 267 insertions(+)
create mode 100644 docs/code/sensitivity/chatterjee/local_exponential.py
create mode 100644 docs/code/sensitivity/chatterjee/local_ishigami.py
create mode 100644 docs/code/sensitivity/chatterjee/local_sobol_func.py
create mode 100644 docs/code/sensitivity/chatterjee/plot_chatterjee_exponential.py
create mode 100644 docs/code/sensitivity/chatterjee/plot_chatterjee_ishigami.py
create mode 100644 docs/code/sensitivity/chatterjee/plot_chatterjee_sobol_func.py
diff --git a/docs/code/sensitivity/chatterjee/local_exponential.py b/docs/code/sensitivity/chatterjee/local_exponential.py
new file mode 100644
index 000000000..1fd0ef0d9
--- /dev/null
+++ b/docs/code/sensitivity/chatterjee/local_exponential.py
@@ -0,0 +1,20 @@
+"""
+
+Auxiliary file
+==============================================
+
+"""
+
+import numpy as np
+
+
+def evaluate(X: np.array) -> np.array:
+ r"""A non-linear function that is used to demonstrate sensitivity index.
+
+ .. math::
+ f(x) = \exp(x_1 + 2*x_2)
+ """
+
+ Y = np.exp(X[:, 0] + 2 * X[:, 1])
+
+ return Y
diff --git a/docs/code/sensitivity/chatterjee/local_ishigami.py b/docs/code/sensitivity/chatterjee/local_ishigami.py
new file mode 100644
index 000000000..e5af649fe
--- /dev/null
+++ b/docs/code/sensitivity/chatterjee/local_ishigami.py
@@ -0,0 +1,23 @@
+"""
+
+Auxiliary file
+==============================================
+
+"""
+
+import numpy as np
+
+
+def evaluate(X, params=[7, 0.1]):
+ """Non-monotonic Ishigami-Homma three parameter test function"""
+
+ a = params[0]
+ b = params[1]
+
+ Y = (
+ np.sin(X[:, 0])
+ + a * np.power(np.sin(X[:, 1]), 2)
+ + b * np.power(X[:, 2], 4) * np.sin(X[:, 0])
+ )
+
+ return Y
diff --git a/docs/code/sensitivity/chatterjee/local_sobol_func.py b/docs/code/sensitivity/chatterjee/local_sobol_func.py
new file mode 100644
index 000000000..1ccabc6dd
--- /dev/null
+++ b/docs/code/sensitivity/chatterjee/local_sobol_func.py
@@ -0,0 +1,42 @@
+"""
+
+Auxiliary file
+==============================================
+
+"""
+
+import numpy as np
+import copy
+
+
+def evaluate(X, a_values):
+
+ dims = len(a_values)
+ g = 1
+
+ for i in range(dims):
+ g_i = (np.abs(4 * X[:, i] - 2) + a_values[i]) / (1 + a_values[i])
+ g *= g_i
+
+ return g
+
+
+def sensitivities(a_values):
+
+ dims = len(a_values)
+
+ Total_order = np.zeros((dims, 1))
+
+ V_i = (3 * (1 + a_values) ** 2) ** (-1)
+
+ total_variance = np.prod(1 + V_i) - 1
+
+ First_order = V_i / total_variance
+
+ for i in range(dims):
+
+ rem_First_order = copy.deepcopy(V_i)
+ rem_First_order[i] = 0
+ Total_order[i] = V_i[i] * np.prod(rem_First_order + 1) / total_variance
+
+ return First_order.reshape(-1, 1), Total_order
diff --git a/docs/code/sensitivity/chatterjee/plot_chatterjee_exponential.py b/docs/code/sensitivity/chatterjee/plot_chatterjee_exponential.py
new file mode 100644
index 000000000..2922b97af
--- /dev/null
+++ b/docs/code/sensitivity/chatterjee/plot_chatterjee_exponential.py
@@ -0,0 +1,54 @@
+"""
+
+Exponential function
+==============================================
+
+.. math::
+ f(x) := \exp(x_1 + 2x_2), \quad x_1, x_2 \sim \mathcal{N}(0, 1)
+
+"""
+
+# %%
+from UQpy.run_model.RunModel import RunModel
+from UQpy.run_model.model_execution.PythonModel import PythonModel
+from UQpy.distributions import Normal
+from UQpy.distributions.collection.JointIndependent import JointIndependent
+from UQpy.sensitivity.chatterjee import Chatterjee
+
+# %%
+# Create Model object
+model = PythonModel(
+ model_script="local_exponential.py",
+ model_object_name="evaluate",
+ var_names=[
+ "X_1",
+ "X_2",
+ ],
+ delete_files=True,
+)
+
+runmodel_obj = RunModel(model=model)
+
+# Define distribution object
+dist_object = JointIndependent([Normal(0, 1)] * 2)
+
+# %% [markdown]
+# Compute Chatterjee indices
+
+# %%
+SA = Chatterjee(runmodel_obj, dist_object)
+
+# Compute Sobol indices using the pick and freeze algorithm
+computed_indices = SA.run(n_samples=1_000_000)
+
+# %% [markdown]
+# Cramer-von Mises sensitivity analysis
+#
+# Expected value of the sensitivity indices:
+#
+# $S^1_{CVM} = \frac{6}{\pi} \operatorname{arctan}(2) - 2 \approx 0.1145$
+#
+# $S^2_{CVM} = \frac{6}{\pi} \operatorname{arctan}(\sqrt{19}) - 2 \approx 0.5693$
+
+# %%
+computed_indices["chatterjee_i"]
diff --git a/docs/code/sensitivity/chatterjee/plot_chatterjee_ishigami.py b/docs/code/sensitivity/chatterjee/plot_chatterjee_ishigami.py
new file mode 100644
index 000000000..66897d670
--- /dev/null
+++ b/docs/code/sensitivity/chatterjee/plot_chatterjee_ishigami.py
@@ -0,0 +1,58 @@
+r"""
+
+Ishigami function
+==============================================
+
+.. math::
+ f(x_1, x_2, x_3) = sin(x_1) + a \cdot sin^2(x_2) + b \cdot x_3^4 sin(x_1)
+
+.. math::
+ x_1, x_2, x_3 \sim \mathcal{U}(-\pi, \pi), \quad a, b\in \mathbb{R}
+
+"""
+
+# %%
+import numpy as np
+
+from UQpy.run_model.RunModel import RunModel
+from UQpy.run_model.model_execution.PythonModel import PythonModel
+from UQpy.distributions import Uniform
+from UQpy.distributions.collection.JointIndependent import JointIndependent
+from UQpy.sensitivity.chatterjee import Chatterjee
+
+# %%
+# Create Model object
+model = PythonModel(
+ model_script="local_ishigami.py",
+ model_object_name="evaluate",
+ var_names=[r"$X_1$", "$X_2$", "$X_3$"],
+ delete_files=True,
+ params=[7, 0.1],
+)
+
+runmodel_obj = RunModel(model=model)
+
+# Define distribution object
+dist_object = JointIndependent([Uniform(-np.pi, 2 * np.pi)] * 3)
+
+# %% [markdown]
+# Compute Chatterjee indices
+
+# %%
+SA = Chatterjee(runmodel_obj, dist_object)
+
+computed_indices = SA.run(
+ n_samples=100_000,
+ estimate_sobol_indices=True,
+ num_bootstrap_samples=100,
+ confidence_level=0.95,
+)
+
+# %%
+computed_indices["chatterjee_i"]
+
+# %%
+computed_indices["CI_chatterjee_i"]
+
+# %%
+computed_indices["sobol_i"]
diff --git a/docs/code/sensitivity/chatterjee/plot_chatterjee_sobol_func.py b/docs/code/sensitivity/chatterjee/plot_chatterjee_sobol_func.py
new file mode 100644
index 000000000..578131426
--- /dev/null
+++ b/docs/code/sensitivity/chatterjee/plot_chatterjee_sobol_func.py
@@ -0,0 +1,70 @@
+r"""
+
+Sobol function
+==============================================
+
+.. math::
+
+ g(x_1, x_2, \ldots, x_D) := \prod_{i=1}^{D} \frac{|4x_i - 2| + a_i}{1 + a_i},
+
+where,
+
+.. math::
+ x_i \sim \mathcal{U}(0, 1), \quad a_i \in \mathbb{R}.
+
+"""
+
+# %%
+import numpy as np
+
+from UQpy.run_model.RunModel import RunModel
+from UQpy.run_model.model_execution.PythonModel import PythonModel
+from UQpy.distributions import Uniform
+from UQpy.distributions.collection.JointIndependent import JointIndependent
+from UQpy.sensitivity.chatterjee import Chatterjee
+
+# %%
+# Create Model object
+num_vars = 6
+a_vals = np.array([0.0, 0.5, 3.0, 9.0, 99.0, 99.0])
+
+model = PythonModel(
+ model_script="local_sobol_func.py",
+ model_object_name="evaluate",
+ var_names=["X_" + str(i) for i in range(num_vars)],
+ delete_files=True,
+ a_values=a_vals,
+)
+
+runmodel_obj = RunModel(model=model)
+
+# Define distribution object
+dist_object = JointIndependent([Uniform(0, 1)] * num_vars)
+
+# %% [markdown]
+# Compute Chatterjee indices
+
+# %%
+SA = Chatterjee(runmodel_obj, dist_object)
+
+# Compute Sobol indices using the pick and freeze algorithm
+computed_indices = SA.run(n_samples=500_000, estimate_sobol_indices=True)
+
+# %%
+computed_indices["chatterjee_i"]
+
+# %% [markdown]
+# $S_1$ = 5.86781190e-01
+#
+# $S_2$ = 2.60791640e-01
+#
+# $S_3$ = 3.66738244e-02
+#
+# $S_4$ = 5.86781190e-03
+#
+# $S_5$ = 5.86781190e-05
+#
+# $S_6$ = 5.86781190e-05
+
+# %%
+computed_indices["sobol_i"]
From e181e738dfe4c441cb917a67e2b6c2736e7b2f0f Mon Sep 17 00:00:00 2001
From: Prateek Bhustali
Date: Sun, 8 May 2022 23:50:29 +0200
Subject: [PATCH 20/59] Add unit tests for Chatterjee sensitivity
---
.../unit_tests/sensitivity/test_chatterjee.py | 230 ++++++++++++++++++
1 file changed, 230 insertions(+)
create mode 100644 tests/unit_tests/sensitivity/test_chatterjee.py
diff --git a/tests/unit_tests/sensitivity/test_chatterjee.py b/tests/unit_tests/sensitivity/test_chatterjee.py
new file mode 100644
index 000000000..8a7c6495f
--- /dev/null
+++ b/tests/unit_tests/sensitivity/test_chatterjee.py
@@ -0,0 +1,230 @@
+"""
+This is the test module for the Chatterjee sensitivity indices.
+
+Here, we will use the exponential function to test the output, as in
+the test module for Cramer sensitivity indices for the Chatterjee indices and
+the ishigami function as in the test module for Sobol sensitivity indices for the
+Sobol indices.
+
+The following methods are tested:
+1. pick_and_freeze_estimator
+2. Sobol estimate
+
+Important
+----------
+The computed indices are computed using the `np.isclose` function.
+
+Function signature:
+ numpy.isclose(a, b, rtol=1e-05, atol=1e-08, equal_nan=False)
+
+ Parameters:
+ a, b: array_like
+ Input arrays to compare.
+
+ rtol: float
+ The relative tolerance parameter.
+
+ atol: float
+ The absolute tolerance parameter.
+
+Each element of the `diff` array is compared as follows:
+diff = |a - b|
+diff <= atol + rtol * abs(b)
+
+- relative tolerance: rtol * abs(b)
+ It is the maximum allowed difference between a and b,
+ relative to the absolute value of b.
+ For example, to set a tolerance of 1%, pass rol=0.01,
+ which assures that the values are within 2 decimal places of each other.
+
+- absolute tolerance: atol
+ When b is close to zero, the atol value is used.
+
+"""
+
+import numpy as np
+import pytest
+
+from UQpy.run_model.RunModel import RunModel
+from UQpy.run_model.model_execution.PythonModel import PythonModel
+from UQpy.distributions import Uniform, Normal
+from UQpy.distributions.collection.JointIndependent import JointIndependent
+from UQpy.sensitivity.chatterjee import Chatterjee
+
+
+# Prepare
+###############################################################################
+
+# Prepare the input distribution
+@pytest.fixture()
+def exponential_input_dist_object():
+ """
+ This function returns the input distribution for the Ishigami function.
+
+ X1 ~ Normal(0,1)
+ X2 ~ Normal(0,1)
+
+ """
+ return JointIndependent([Normal(0, 1)] * 2)
+
+
+@pytest.fixture()
+def exponential_model_object():
+ """This function creates the exponential run_model_object"""
+ model = PythonModel(
+ model_script="exponential.py",
+ model_object_name="evaluate",
+ var_names=[
+ "X_1",
+ "X_2",
+ ],
+ delete_files=True,
+ )
+
+ runmodel_obj = RunModel(model=model)
+
+ return runmodel_obj
+
+
+@pytest.fixture()
+def Chatterjee_object(exponential_model_object, exponential_input_dist_object):
+ """This function creates the Chatterjee object"""
+ return Chatterjee(exponential_model_object, exponential_input_dist_object)
+
+
+@pytest.fixture()
+def analytical_Chatterjee_indices():
+ """This function returns the analytical Chatterjee indices.
+
+ S1 = (6/np.pi) * np.arctan(2) - 2
+ S2 = (6/np.pi) * np.arctan(np.sqrt(19)) - 2
+
+ print(np.around(S1, 4))
+ print(np.around(S2, 4))
+
+ """
+
+ return np.array([[0.1145], [0.5693]])
+
+
+@pytest.fixture()
+def numerical_Chatterjee_indices(Chatterjee_object):
+ """This function returns the numerical Chatterjee indices."""
+
+ SA = Chatterjee_object
+
+ np.random.seed(12345) #! set seed for reproducibility
+
+ computed_indices = SA.run(n_samples=10_000)
+
+ return computed_indices["chatterjee_i"]
+
+
+@pytest.fixture()
+def ishigami_input_dist_object():
+ """
+ This function returns the input distribution for the Ishigami function.
+
+ X1 ~ Uniform(-pi, pi)
+ X2 ~ Uniform(-pi, pi)
+ X3 ~ Uniform(-pi, pi)
+
+ """
+ return JointIndependent([Uniform(-np.pi, 2 * np.pi)] * 3)
+
+
+@pytest.fixture()
+def ishigami_model_object():
+ """This function creates the Ishigami run_model_object"""
+ model = PythonModel(
+ model_script="ishigami.py",
+ model_object_name="evaluate",
+ var_names=[r"$X_1$", "$X_2$", "$X_3$"],
+ delete_files=True,
+ params=[7, 0.1],
+ )
+
+ runmodel_obj = RunModel(model=model)
+
+ return runmodel_obj
+
+
+@pytest.fixture()
+def Chatterjee_object_ishigami(ishigami_model_object, ishigami_input_dist_object):
+ """This function creates the Chatterjee object"""
+ return Chatterjee(ishigami_model_object, ishigami_input_dist_object)
+
+
+@pytest.fixture()
+def numerical_Sobol_indices(Chatterjee_object_ishigami):
+ """This function returns the Sobol indices."""
+
+ SA = Chatterjee_object_ishigami
+
+ np.random.seed(12345)
+
+ computed_indices = SA.run(n_samples=10_000, estimate_sobol_indices=True)
+
+ return computed_indices["sobol_i"]
+
+
+@pytest.fixture()
+def analytical_ishigami_Sobol_indices():
+ """
+ Analytical Sobol indices for the Ishigami function.
+
+ Copy-paste the following to reproduce the given indices:
+
+ a = 7
+ b = 0.1
+
+ V1 = 0.5*(1 + (b*np.pi**4)/5)**2
+ V2 = (a**2)/8
+ V3 = 0
+
+ VT3 = (8*(b**2)*np.pi**8)/225
+ VT1 = V1 + VT3
+ VT2 = V2
+
+ total_variance = V2 + (b*np.pi**4)/5 + ((b**2) * np.pi**8)/18 + 0.5
+
+ S = np.array([V1, V2, V3])/total_variance
+ S_T = np.array([VT1, VT2, VT3])/total_variance
+
+ S = np.around(S, 4)
+ S_T = np.around(S_T, 4)
+
+ """
+
+ S1 = 0.3139
+ S2 = 0.4424
+ S3 = 0
+
+ S_T1 = 0.5576
+ S_T2 = 0.4424
+ S_T3 = 0.2437
+
+ S = np.array([S1, S2, S3])
+ S_T = np.array([S_T1, S_T2, S_T3])
+
+ return S.reshape(-1, 1)
+
+
+# Unit tests
+###############################################################################
+
+
+def test_Chatterjee_estimate(
+ numerical_Chatterjee_indices, analytical_Chatterjee_indices
+):
+ """This function tests the Chatterjee estimate."""
+ assert np.isclose(
+ numerical_Chatterjee_indices, analytical_Chatterjee_indices, rtol=0, atol=1e-2
+ ).all()
+
+
+def test_Sobol_estimate(numerical_Sobol_indices, analytical_ishigami_Sobol_indices):
+ """This function tests the Sobol estimate."""
+ assert np.isclose(
+ numerical_Sobol_indices, analytical_ishigami_Sobol_indices, rtol=0, atol=1e-2
+ ).all()
From 1e6118f00155192508aedf0e12cf0df126cff5d4 Mon Sep 17 00:00:00 2001
From: Prateek Bhustali
Date: Mon, 9 May 2022 03:23:33 +0200
Subject: [PATCH 21/59] Added generalised sobol sensitivity
---
src/UQpy/sensitivity/__init__.py | 2 +
src/UQpy/sensitivity/generalised_sobol.py | 378 ++++++++++++++++++++++
2 files changed, 380 insertions(+)
create mode 100644 src/UQpy/sensitivity/generalised_sobol.py
diff --git a/src/UQpy/sensitivity/__init__.py b/src/UQpy/sensitivity/__init__.py
index 73c4166da..e25335a34 100644
--- a/src/UQpy/sensitivity/__init__.py
+++ b/src/UQpy/sensitivity/__init__.py
@@ -3,9 +3,11 @@
from UQpy.sensitivity.sobol import Sobol
from UQpy.sensitivity.cramer_von_mises import CramervonMises
from UQpy.sensitivity.chatterjee import Chatterjee
+from UQpy.sensitivity.generalised_sobol import GeneralisedSobol
from . import MorrisSensitivity
from . import PceSensitivity
from . import Sobol
from . import CramervonMises
from . import Chatterjee
+from . import GeneralisedSobol
diff --git a/src/UQpy/sensitivity/generalised_sobol.py b/src/UQpy/sensitivity/generalised_sobol.py
new file mode 100644
index 000000000..e5cf2f654
--- /dev/null
+++ b/src/UQpy/sensitivity/generalised_sobol.py
@@ -0,0 +1,378 @@
+"""
+
+The GeneralisedSobol class computes the generalised Sobol indices for a given
+multi-ouput model. The class is based on the work of [1]_ and [2]_.
+
+Additionally, we can compute the confidence intervals for the Sobol indices
+using bootstrapping [3]_.
+
+References
+----------
+
+ .. [1] Gamboa F, Janon A, Klein T, Lagnoux A, others.
+ Sensitivity analysis for multidimensional and functional outputs.
+ Electronic journal of statistics 2014; 8(1): 575-603.
+
+ .. [2] Alexanderian A, Gremaud PA, Smith RC. Variance-based sensitivity
+ analysis for time-dependent processes. Reliability engineering
+ & system safety 2020; 196: 106722.
+
+.. [3] Jeremy Orloff and Jonathan Bloom (2014), Bootstrap confidence intervals,
+ Introduction to Probability and Statistics, MIT OCW.
+
+"""
+
+import logging
+
+import numpy as np
+
+from UQpy.sensitivity.baseclass.sensitivity import Sensitivity
+from UQpy.sensitivity.baseclass.pickfreeze import generate_pick_freeze_samples
+from UQpy.utilities.UQpyLoggingFormatter import UQpyLoggingFormatter
+
+
+class GeneralisedSobol(Sensitivity):
+ """
+ Compute the generalised Sobol indices for models with multiple outputs
+ (vector-valued response) using the Pick-and-Freeze method.
+
+ :param runmodel_object: The computational model. It should be of type :class:`.RunModel`. \
+ The output QoI can be a scalar or vector of length :code:`ny`, then the sensitivity \
+ indices of all :code:`ny` outputs are computed independently.
+
+ :param distributions: List of :class:`.Distribution` objects corresponding to each \
+ random variable, or :class:`.JointIndependent` object \
+ (multivariate RV with independent marginals).
+
+ :param random_state: Random seed used to initialize the pseudo-random number \
+ generator. Default is :any:`None`.
+
+ **Methods:**
+ """
+
+ def __init__(
+ self, runmodel_object, dist_object, random_state=None, **kwargs
+ ) -> None:
+
+ super().__init__(runmodel_object, dist_object, random_state, **kwargs)
+
+ # Create logger with the same name as the class
+ self.logger = logging.getLogger(__name__)
+ self.logger.setLevel(logging.ERROR)
+ frmt = UQpyLoggingFormatter()
+
+ # create console handler with a higher log level
+ ch = logging.StreamHandler()
+ ch.setFormatter(frmt)
+
+ # add the handler to the logger
+ self.logger.addHandler(ch)
+
+ self.gen_sobol_i = None
+ "Generalised first order Sobol indices, :class:`ndarray` of shape (num_vars, 1)"
+
+ self.gen_sobol_total_i = None
+ "Generalised total order Sobol indices, :class:`ndarray` of shape (num_vars, 1)"
+
+ self.n_samples = None
+ "Number of samples used to compute the sensitivity indices, :class:`int`"
+
+ self.num_vars = None
+ "Number of model input variables, :class:`int`"
+
+ def run(
+ self,
+ n_samples=1_000,
+ num_bootstrap_samples=None,
+ confidence_level=0.95,
+ ):
+
+ """
+ Compute the generalised Sobol indices for models with multiple outputs
+ (vector-valued response) using the Pick-and-Freeze method.
+
+ :param n_samples: Number of samples used to compute the sensitivity indices. \
+ Default is 1,000.
+
+ :param num_bootstrap_samples: Number of bootstrap samples used to compute the \
+ confidence intervals. Default is :any:`None`.
+
+ :param confidence_level: Confidence level used to compute the confidence \
+ intervals. Default is 0.95.
+
+ :return: A :class:`dict` with the following keys: \
+ :code:`gen_sobol_i` of shape :code:`(num_vars, 1)`, \
+ :code:`gen_sobol_total_i` of shape :code:`(num_vars, 1)`, \
+ :code:`CI_gen_sobol_i` of shape :code:`(num_vars, 2)`, \
+ :code:`CI_gen_sobol_total_i` of shape :code:`(num_vars, 2)`.
+
+ """
+
+ # Check n_samples data type
+ self.n_samples = n_samples
+ if not isinstance(self.n_samples, int):
+ raise TypeError("UQpy: n_samples should be an integer")
+
+ # Check num_bootstrap_samples data type
+ if num_bootstrap_samples is not None:
+ if not isinstance(num_bootstrap_samples, int):
+ raise TypeError("UQpy: num_bootstrap_samples should be an integer.\n")
+ elif num_bootstrap_samples is None:
+ self.logger.info(
+ "UQpy: num_bootstrap_samples is set to None, confidence intervals will not be computed.\n"
+ )
+
+ ################## GENERATE SAMPLES ##################
+
+ (A_samples, B_samples, C_i_generator, _,) = generate_pick_freeze_samples(
+ self.dist_object, self.n_samples, self.random_state
+ )
+
+ self.logger.info("UQpy: Generated samples using the pick-freeze scheme.\n")
+
+ self.num_vars = A_samples.shape[1] # Number of variables
+
+ ################# MODEL EVALUATIONS ####################
+
+ A_model_evals = self._run_model(A_samples) # shape: (n_samples, n_outputs)
+
+ # if model output is vectorised,
+ # shape retured by model is (n_samples, n_outputs, 1)
+ # we need to reshape it to (n_samples, n_outputs)
+ if A_model_evals.ndim == 3:
+ A_model_evals = A_model_evals[:, :, 0] # shape: (n_samples, n_outputs)
+
+ self.logger.info("UQpy: Model evaluations A completed.\n")
+
+ B_model_evals = self._run_model(B_samples) # shape: (n_samples, n_outputs)
+
+ # if model output is vectorised,
+ # shape retured by model is (n_samples, n_outputs, 1)
+ # we need to reshape it to (n_samples, n_outputs)
+ if B_model_evals.ndim == 3:
+ B_model_evals = B_model_evals[:, :, 0] # shape: (n_samples, n_outputs)
+
+ self.logger.info("UQpy: Model evaluations B completed.\n")
+
+ self.n_outputs = A_model_evals.shape[1]
+
+ # shape: (n_outputs, n_samples, num_vars)
+ C_i_model_evals = np.zeros((self.n_outputs, self.n_samples, self.num_vars))
+
+ for i, C_i in enumerate(C_i_generator):
+
+ # if model output is vectorised,
+ # shape retured by model is (n_samples, n_outputs, 1)
+ # we need to reshape it to (n_samples, n_outputs)
+ model_evals = self._run_model(C_i)
+
+ if model_evals.ndim == 3:
+ C_i_model_evals[:, :, i] = self._run_model(C_i)[:, :, 0].T
+ else:
+ C_i_model_evals[:, :, i] = model_evals.T
+
+ self.logger.info("UQpy: Model evaluations C completed.\n")
+
+ self.logger.info("UQpy: All model evaluations computed successfully.\n")
+
+ ######################### STORAGE ########################
+
+ # Create dictionary to store the sensitivity indices
+ computed_indices = {}
+
+ ################## COMPUTE GENERALISED SOBOL INDICES ##################
+
+ self.gen_sobol_i = self.compute_first_order_generalised_sobol_indices(
+ A_model_evals, B_model_evals, C_i_model_evals
+ )
+
+ self.logger.info(
+ "UQpy: First order Generalised Sobol indices computed successfully.\n"
+ )
+
+ self.gen_sobol_total_i = self.compute_total_order_generalised_sobol_indices(
+ A_model_evals, B_model_evals, C_i_model_evals
+ )
+
+ self.logger.info(
+ "UQpy: Total order Generalised Sobol indices computed successfully.\n"
+ )
+
+ # Store the indices in the dictionary
+ computed_indices["gen_sobol_i"] = self.gen_sobol_i
+ computed_indices["gen_sobol_total_i"] = self.gen_sobol_total_i
+
+ ################## CONFIDENCE INTERVALS ####################
+
+ if num_bootstrap_samples is not None:
+
+ self.logger.info("UQpy: Computing confidence intervals ...\n")
+
+ estimator_inputs = [
+ A_model_evals,
+ B_model_evals,
+ C_i_model_evals,
+ ]
+
+ # First order generalised Sobol indices
+ self.CI_gen_sobol_i = self.bootstrapping(
+ self.compute_first_order_generalised_sobol_indices,
+ estimator_inputs,
+ computed_indices["gen_sobol_i"],
+ num_bootstrap_samples,
+ confidence_level,
+ )
+
+ self.logger.info(
+ "UQpy: Confidence intervals for First order Generalised Sobol indices computed successfully.\n"
+ )
+
+ # Total order generalised Sobol indices
+ self.CI_gen_sobol_total_i = self.bootstrapping(
+ self.compute_total_order_generalised_sobol_indices,
+ estimator_inputs,
+ computed_indices["gen_sobol_total_i"],
+ num_bootstrap_samples,
+ confidence_level,
+ )
+
+ self.logger.info(
+ "UQpy: Confidence intervals for Total order Sobol Generalised indices computed successfully.\n"
+ )
+
+ # Store the indices in the dictionary
+ computed_indices["CI_gen_sobol_i"] = self.CI_gen_sobol_i
+ computed_indices["CI_gen_sobol_total_i"] = self.CI_gen_sobol_total_i
+
+ return computed_indices
+
+ @staticmethod
+ def compute_first_order_generalised_sobol_indices(
+ A_model_evals, B_model_evals, C_i_model_evals
+ ):
+
+ """
+ Compute the generalised Sobol indices for models with multiple outputs.
+
+ :param A_model_evals: Model evaluations, :class:`numpy.ndarray` of shape :code:`(n_samples, n_outputs)`.
+ :param B_model_evals: Model evaluations, :class:`numpy.ndarray` of shape :code:`(n_samples, n_outputs)`.
+ :param C_i_model_evals: Model evaluations, :class:`numpy.ndarray` of shape :code:`(n_outputs, n_samples, num_vars)`.
+
+ :return: First order generalised Sobol indices, :class:`numpy.ndarray` of shape :code:`(n_outputs, num_vars)`.
+
+ """
+
+ num_vars = C_i_model_evals.shape[2]
+ n_outputs = A_model_evals.shape[1]
+
+ # store generalised Sobol indices
+ gen_sobol_i = np.zeros((num_vars, 1))
+
+ for i in range(num_vars):
+
+ all_Y_i = A_model_evals.T # shape: (n_outputs, n_samples)
+ all_Y_i_tilde = B_model_evals.T # shape: (n_outputs, n_samples)
+ all_Y_i_u = C_i_model_evals[:, :, i] # shape: (n_outputs, n_samples)
+
+ # compute the mean using all model evaluations
+ # shape: (n_outputs, 1)
+ mean = (
+ np.mean(all_Y_i, axis=1, keepdims=1)
+ + np.mean(all_Y_i_u, axis=1, keepdims=1)
+ + np.mean(all_Y_i_tilde, axis=1, keepdims=1)
+ ) / 3
+
+ # center the evaluations since mean is available
+ all_Y_i = all_Y_i - mean
+ all_Y_i_tilde = all_Y_i_tilde - mean
+ all_Y_i_u = all_Y_i_u - mean
+
+ # compute the variance matrix using all available model evaluations
+ # shape: (n_outputs, n_outputs)
+ C = (np.cov(all_Y_i) + np.cov(all_Y_i_u) + np.cov(all_Y_i_tilde)) / 3
+
+ # compute covariance btw. RVs 'X' and 'Y'
+ # shape: (2*n_outputs, 2*n_outputs)
+ # It contains the following 4 block matrices:
+ # (1, 1) variance of 'X'
+ # *(1, 2) covariance between 'X' and 'Y' (a.k.a. cross-covariance)
+ # (2, 1) covariance between 'Y' and 'X' (a.k.a. cross-covariance)
+ # (2, 2) variance of 'Y'
+ _cov_1 = np.cov(all_Y_i_u, all_Y_i) # for first order indices
+
+ # We need the cross-covariance between 'X' and 'Y'
+ # Extract *(1, 2) (upper right block)
+ # shape: (n_outputs, n_outputs)
+ C_u = _cov_1[0:n_outputs, n_outputs : 2 * n_outputs]
+
+ denominator = np.trace(C)
+
+ # Generalised Sobol indices
+ gen_sobol_i[i] = np.trace(C_u) / denominator
+
+ return gen_sobol_i
+
+ @staticmethod
+ def compute_total_order_generalised_sobol_indices(
+ A_model_evals, B_model_evals, C_i_model_evals
+ ):
+
+ """
+ Compute the generalised Sobol indices for models with multiple outputs.
+
+ :param A_model_evals: Model evaluations, :class:`numpy.ndarray` of shape :code:`(n_samples, n_outputs)`.
+ :param B_model_evals: Model evaluations, :class:`numpy.ndarray` of shape :code:`(n_samples, n_outputs)`.
+ :param C_i_model_evals: Model evaluations, :class:`numpy.ndarray` of shape :code:`(n_outputs, n_samples, num_vars)`.
+
+ :return: Total order generalised Sobol indices, :class:`numpy.ndarray` of shape :code:`(n_outputs, num_vars)`.
+
+ """
+
+ num_vars = C_i_model_evals.shape[2]
+ n_outputs = A_model_evals.shape[1]
+
+ # store generalised Sobol indices
+ gen_sobol_total_i = np.zeros((num_vars, 1))
+
+ for i in range(num_vars):
+
+ all_Y_i = A_model_evals.T # shape: (n_outputs, n_samples)
+ all_Y_i_tilde = B_model_evals.T # shape: (n_outputs, n_samples)
+ all_Y_i_u = C_i_model_evals[:, :, i] # shape: (n_outputs, n_samples)
+
+ # compute the mean using all model evaluations
+ # shape: (n_outputs, 1)
+ mean = (
+ np.mean(all_Y_i, axis=1, keepdims=1)
+ + np.mean(all_Y_i_u, axis=1, keepdims=1)
+ + np.mean(all_Y_i_tilde, axis=1, keepdims=1)
+ ) / 3
+
+ # center the evaluations since mean is available
+ all_Y_i = all_Y_i - mean
+ all_Y_i_tilde = all_Y_i_tilde - mean
+ all_Y_i_u = all_Y_i_u - mean
+
+ # compute the variance matrix using all available model evaluations
+ # shape: (n_outputs, n_outputs)
+ C = (np.cov(all_Y_i) + np.cov(all_Y_i_u) + np.cov(all_Y_i_tilde)) / 3
+
+ # compute covariance btw. RVs 'X' and 'Y'
+ # shape: (2*n_outputs, 2*n_outputs)
+ # It contains the following 4 block matrices:
+ # (1, 1) variance of 'X'
+ # *(1, 2) covariance between 'X' and 'Y' (a.k.a. cross-covariance)
+ # (2, 1) covariance between 'Y' and 'X' (a.k.a. cross-covariance)
+ # (2, 2) variance of 'Y'
+ _cov_2 = np.cov(all_Y_i_u, all_Y_i_tilde) # for total order indices
+
+ # We need the cross-covariance between 'X' and 'Y'
+ # Extract *(1, 2) (upper right block)
+ # shape: (n_outputs, n_outputs)
+ C_u_tilde = _cov_2[0:n_outputs, n_outputs : 2 * n_outputs]
+ denominator = np.trace(C)
+
+ # Generalised Sobol indices
+ gen_sobol_total_i[i] = 1 - np.trace(C_u_tilde) / denominator
+
+ return gen_sobol_total_i
From 3da45cf5af9360c8dfb7756500000269d0cdd1f2 Mon Sep 17 00:00:00 2001
From: Prateek Bhustali
Date: Mon, 9 May 2022 03:24:18 +0200
Subject: [PATCH 22/59] Added documentation for generalised sobol indices
---
.../sensitivity/generalised_sobol/README.rst | 12 ++++
docs/source/conf.py | 2 +
docs/source/sensitivity/generalised_sobol.rst | 67 +++++++++++++++++++
docs/source/sensitivity/index.rst | 4 +-
docs/source/sensitivity/sobol.rst | 12 ++++
5 files changed, 96 insertions(+), 1 deletion(-)
create mode 100644 docs/code/sensitivity/generalised_sobol/README.rst
create mode 100644 docs/source/sensitivity/generalised_sobol.rst
diff --git a/docs/code/sensitivity/generalised_sobol/README.rst b/docs/code/sensitivity/generalised_sobol/README.rst
new file mode 100644
index 000000000..88a5bec5e
--- /dev/null
+++ b/docs/code/sensitivity/generalised_sobol/README.rst
@@ -0,0 +1,12 @@
+Generalised Sobol Sensitivity indices
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+We demonstrate the computation of GSI for 2 examples with multiple outputs:
+
+1. Mechanical oscillator (analytical solution): Example from [1] page 2
+2. Mechanical oscillator ODE (numerical solution): Example from [2] page 19
+3. Toy example (analytical solution): Example from [2]
+
+.. [1] Alexanderian, Alen, Gremaud, Pierre A and Smith, Ralph C. Variance-based sensitivity analysis for time-dependent processes.
+
+.. [2] Gamboa F, Janon A, Klein T, Lagnoux A, others. Sensitivity analysis for multidimensional and functional outputs. Electronic journal of statistics 2014; 8(1): 575-603.
\ No newline at end of file
diff --git a/docs/source/conf.py b/docs/source/conf.py
index 7d113e439..68538001d 100644
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -94,6 +94,7 @@
"../code/sensitivity/sobol",
"../code/sensitivity/cramer_von_mises",
"../code/sensitivity/chatterjee",
+ "../code/sensitivity/generalised_sobol",
"../code/stochastic_processes/bispectral",
"../code/stochastic_processes/karhunen_loeve",
"../code/stochastic_processes/spectral",
@@ -131,6 +132,7 @@
"auto_examples/sensitivity/sobol",
"auto_examples/sensitivity/cramer_von_mises",
"auto_examples/sensitivity/chatterjee",
+ "auto_examples/sensitivity/generalised_sobol",
"auto_examples/stochastic_processes/bispectral",
"auto_examples/stochastic_processes/karhunen_loeve",
"auto_examples/stochastic_processes/spectral",
diff --git a/docs/source/sensitivity/generalised_sobol.rst b/docs/source/sensitivity/generalised_sobol.rst
new file mode 100644
index 000000000..3515a744b
--- /dev/null
+++ b/docs/source/sensitivity/generalised_sobol.rst
@@ -0,0 +1,67 @@
+Generalised Sobol indices
+----------------------------------------
+
+A natural generalization of the Sobol indices (that are classically defined for single-output models) for multi-output models. The generalised Sobol indices are computed using the Pick-and-Freeze approach. (For implementation details, see also [1]_.)
+
+Consider a model :math:`Y=f(X): \mathbb{R}^d \rightarrow \mathbb{R}^k` with :math:`d` inputs :math:`X=\left[ X_{1}, X_{2},…,X_{d} \right]` and :math:`k` outputs :math:`Y=\left[ Y_{1}, Y_{2},…,Y_{k} \right]`.
+
+As the inputs :math:`X_{1}, \ldots, X_{d}` are independent, :math:`f` may be decomposed through the so-called Hoeffding decomposition:
+
+.. math::
+ f(X) = c + f_{\mathbf{u}}\left(X_{\mathbf{u}}\right)+f_{\sim \mathbf{u}}\left(X_{\sim \mathbf{u}}\right) + f_{\mathbf{u}, \sim \mathbf{u}}\left(X_{\mathbf{u}}, X_{\sim \mathbf{u}}\right)
+
+where :math:`c \in \mathbb{R}^{k}, f_{\mathbf{u}}: E_{\mathbf{u}} \rightarrow \mathbb{R}^{k}, f_{\sim \mathbf{u}}: E_{\sim \mathbf{u}} \rightarrow \mathbb{R}^{k}` and :math:`f_{\mathbf{u}, \sim \mathbf{u}}: E \rightarrow \mathbb{R}^{k}` are given by
+:math:`c=\mathbb{E}(Y), f_{\mathbf{u}}=\mathbb{E}\left(Y \mid X_{\mathbf{u}}\right)-c, f_{\sim \mathbf{u}}=\mathbb{E}\left(Y \mid X_{\sim \mathbf{u}}\right)-c, f_{u, \sim \mathbf{u}}=Y-f_{\mathbf{u}}-f_{\sim \mathbf{u}}-c`
+
+Thanks to :math:`L^{2}`-orthogonality, computing the covariance matrix of both sides of the above equation leads to
+
+.. math::
+ \Sigma = C_{\mathbf{u}}+C_{\sim \mathbf{u}}+C_{\mathbf{u}, \sim \mathbf{u}}.
+
+Here, :math:`\Sigma, C_{\mathbf{u}}, C_{\sim \mathbf{u}}` and :math:`C_{\mathbf{u}, \sim \mathbf{u}}` are denoting respectively the covariance matrices of :math:`Y, f_{\mathbf{u}}\left(X_{\mathbf{u}}\right), f_{\sim \mathbf{u}}\left(X_{\sim \mathbf{u}}\right)` and :math:`f_{\mathbf{u}, \sim \mathbf{u}}\left(X_{\mathbf{u}}, X_{\sim \mathbf{u}}\right)`.
+
+The First order generalised Sobol indices can be computed using the Pick-and-Freeze approach as follows, where :math:`\mathbf{u}` is a variable :math:`i` of the independent random variables.
+
+.. math::
+ S_{i, N}=\frac{\operatorname{Tr}\left(C_{i, N}\right)}{\operatorname{Tr}\left(\Sigma_{N}\right)}
+
+where :math:`C_{\mathbf{i}, N}` and :math:`\Sigma_{N}` are the empirical estimators of :math:`C_{\mathbf{i}}=\operatorname{Cov}\left(Y, Y^{\mathbf{i}}\right)` and :math:`\Sigma=\mathbb{V}[Y]` defined by
+
+.. math::
+ C_{\mathbf{i}, N}=\frac{1}{N} \sum_{j=1}^{N} Y_{j}^{\mathrm{i}} Y_{j}^{t}-\left(\frac{1}{N} \sum_{j=1}^{N} \frac{Y_{j}+Y_{j}^{\mathbf{i}}}{2}\right)\left(\frac{1}{N} \sum_{j=1}^{N} \frac{Y_{j}+Y_{j}^{\mathbf{i}}}{2}\right)^{t}
+
+and
+
+.. math::
+ \Sigma_{N}=\frac{1}{N} \sum_{j=1}^{N} \frac{Y_{j} Y_{j}^{t}+Y_{j}^{\mathbf{i}}\left(Y_{j}^{\mathbf{i}}\right)^{t}}{2}-\left(\frac{1}{N} \sum_{j=1}^{N} \frac{Y_{j}+Y_{j}^{\mathbf{i}}}{2}\right)\left(\frac{1}{N} \sum_{j=1}^{N} \frac{Y_{j}+Y_{j}^{\mathbf{i}}}{2}\right)^{t}
+
+
+.. [1] Gamboa F, Janon A, Klein T, Lagnoux A, others. Sensitivity analysis for multidimensional and functional outputs. Electronic journal of statistics 2014; 8(1): 575-603.(`Link `_)
+
+
+Generalised Sobol Class
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The :class:`Generalised Sobol` class is imported using the following command:
+
+>>> from UQpy.sensitivity.generalised_sobol import GeneralisedSobol
+
+Methods
+"""""""
+
+.. autoclass:: UQpy.sensitivity.GeneralisedSobol
+ :members: run
+
+Attributes
+""""""""""
+.. autoattribute:: UQpy.sensitivity.GeneralisedSobol.gen_sobol_i
+.. autoattribute:: UQpy.sensitivity.GeneralisedSobol.gen_sobol_total_i
+.. autoattribute:: UQpy.sensitivity.GeneralisedSobol.n_samples
+.. autoattribute:: UQpy.sensitivity.GeneralisedSobol.num_vars
+
+Examples
+""""""""""
+
+.. toctree::
+
+ Generalised Sobol Examples <../auto_examples/sensitivity/generalised_sobol/index>
\ No newline at end of file
diff --git a/docs/source/sensitivity/index.rst b/docs/source/sensitivity/index.rst
index e663eed66..1b2a8367d 100644
--- a/docs/source/sensitivity/index.rst
+++ b/docs/source/sensitivity/index.rst
@@ -5,8 +5,9 @@ This module contains functionality for all the sampling methods supported in :py
The module currently contains the following classes:
-- :py:class:`.CramervonMises`: Class to compute Chatterjee sensitivity indices.
+- :py:class:`.Chatterjee`: Class to compute Chatterjee sensitivity indices.
- :py:class:`.CramervonMises`: Class to compute Cramér-von Mises sensitivity indices.
+- :py:class:`.GeneralisedSobol`: Class to compute Generalised Sobol sensitivity indices.
- :py:class:`.MorrisSensitivity`: Class to perform Morris.
- :py:class:`.PceSensitivity`: Class to compute the sensitivity indices using the :class:`.PolynomialChaosExpansion` method.
- :py:class:`.Sobol`: Class to compute Sobol sensitivity indices.
@@ -21,6 +22,7 @@ Sensitivity analysis comprises techniques focused on determining how the variati
Chatterjee
Cramér-von Mises Sensitivity
+ Generalised Sobol Sensitivity
Morris Sensitivity
Polynomial Chaos Sensitivity
Sobol Sensitivity
diff --git a/docs/source/sensitivity/sobol.rst b/docs/source/sensitivity/sobol.rst
index 462db15fd..fa4d49265 100644
--- a/docs/source/sensitivity/sobol.rst
+++ b/docs/source/sensitivity/sobol.rst
@@ -76,6 +76,18 @@ Methods
.. autoclass:: UQpy.sensitivity.Sobol
:members: run
+Attributes
+""""""""""
+.. autoattribute:: UQpy.sensitivity.Sobol.sobol_i
+.. autoattribute:: UQpy.sensitivity.Sobol.sobol_total_i
+.. autoattribute:: UQpy.sensitivity.Sobol.CI_sobol_i
+.. autoattribute:: UQpy.sensitivity.Sobol.CI_sobol_total_i
+.. autoattribute:: UQpy.sensitivity.Sobol.CI_sobol_ij
+.. autoattribute:: UQpy.sensitivity.Sobol.n_samples
+.. autoattribute:: UQpy.sensitivity.Sobol.num_vars
+.. autoattribute:: UQpy.sensitivity.Sobol.multioutput
+
+
Examples
""""""""""
From 29b47271e500ed76dfbab00156831536957be1d0 Mon Sep 17 00:00:00 2001
From: Prateek Bhustali
Date: Mon, 9 May 2022 03:24:53 +0200
Subject: [PATCH 23/59] Add unit tests for generalised sobol sensitivity
---
tests/unit_tests/sensitivity/multioutput.py | 42 +++
.../sensitivity/test_generalised_sobol.py | 315 ++++++++++++++++++
2 files changed, 357 insertions(+)
create mode 100644 tests/unit_tests/sensitivity/multioutput.py
create mode 100644 tests/unit_tests/sensitivity/test_generalised_sobol.py
diff --git a/tests/unit_tests/sensitivity/multioutput.py b/tests/unit_tests/sensitivity/multioutput.py
new file mode 100644
index 000000000..6974d37c0
--- /dev/null
+++ b/tests/unit_tests/sensitivity/multioutput.py
@@ -0,0 +1,42 @@
+""""
+This is the toy example with multiple outputs from [1]_.
+
+References
+----------
+
+.. [1] Gamboa F, Janon A, Klein T, Lagnoux A, others.
+ Sensitivity analysis for multidimensional and functional outputs.
+ Electronic journal of statistics 2014; 8(1): 575-603.
+
+"""
+
+import numpy as np
+
+
+def evaluate(X):
+
+ """
+
+ * **Input:**
+
+ * **X** (`ndarray`):
+ Samples from the input distribution.
+ Shape: (n_samples, 2)
+
+ * **Output:**
+
+ * **Y** (`ndarray`):
+ Model evaluations.
+ Shape: (2, n_samples)
+
+ """
+
+ n_samples = X.shape[0]
+
+ output = np.zeros((2, n_samples))
+
+ output[0, :] = X[:, 0] + X[:, 1] + X[:, 0] * X[:, 1]
+
+ output[1, :] = 2 * X[:, 0] + X[:, 1] + 3 * X[:, 0] * X[:, 1]
+
+ return output
diff --git a/tests/unit_tests/sensitivity/test_generalised_sobol.py b/tests/unit_tests/sensitivity/test_generalised_sobol.py
new file mode 100644
index 000000000..c759d85bb
--- /dev/null
+++ b/tests/unit_tests/sensitivity/test_generalised_sobol.py
@@ -0,0 +1,315 @@
+""""
+This is the test module for the Generalised Sobol indices.
+
+Here, we will use the toy example from [1]_, which is a multi-output problem.
+
+
+References
+----------
+
+.. [1] Gamboa F, Janon A, Klein T, Lagnoux A, others.
+ Sensitivity analysis for multidimensional and functional outputs.
+ Electronic journal of statistics 2014; 8(1): 575-603.
+
+Important
+----------
+The computed indices are computed using the `np.isclose` function.
+
+Function signature:
+ numpy.isclose(a, b, rtol=1e-05, atol=1e-08, equal_nan=False)
+
+ Parameters:
+ a, b: array_like
+ Input arrays to compare.
+
+ rtol: float
+ The relative tolerance parameter.
+
+ atol: float
+ The absolute tolerance parameter.
+
+Each element of the `diff` array is compared as follows:
+diff = |a - b|
+diff <= atol + rtol * abs(b)
+
+- relative tolerance: rtol * abs(b)
+ It is the maximum allowed difference between a and b,
+ relative to the absolute value of b.
+ For example, to set a tolerance of 1%, pass rol=0.01,
+ which assures that the values are within 2 decimal places of each other.
+- absolute tolerance: atol
+ When b is close to zero, the atol value is used.
+
+"""
+
+import numpy as np
+import pytest
+import scipy
+
+from UQpy.run_model.RunModel import RunModel
+from UQpy.run_model.model_execution.PythonModel import PythonModel
+from UQpy.distributions import Uniform, Normal
+from UQpy.distributions.collection.JointIndependent import JointIndependent
+from UQpy.sensitivity.generalised_sobol import GeneralisedSobol
+
+# Prepare
+###############################################################################
+
+# Prepare the input distribution
+@pytest.fixture()
+def normal_input_dist_object():
+ """
+ This function returns the input distribution for the toy model.
+
+ X1 ~ Normal(0, 1)
+ X2 ~ Normal(0, 1)
+
+ """
+ return JointIndependent([Normal(0, 1)] * 2)
+
+
+@pytest.fixture()
+def uniform_input_dist_object():
+ """
+ This function returns the input distribution for the toy model.
+
+ X1 ~ Uniform(0, 1)
+ X2 ~ Uniform(0, 1)
+
+ """
+ return JointIndependent([Uniform(0, 1)] * 2)
+
+
+@pytest.fixture()
+def toy_model_object():
+ """
+ This function creates the toy model.
+
+ """
+ model = PythonModel(
+ model_script="multioutput.py",
+ model_object_name="evaluate",
+ var_names=[
+ "X_1",
+ "X_2",
+ ],
+ delete_files=True,
+ )
+
+ runmodel_obj = RunModel(model=model)
+
+ return runmodel_obj
+
+
+@pytest.fixture()
+def generalised_sobol_object_normal(normal_input_dist_object, toy_model_object):
+ """
+ This function creates the Generalised Sobol indices object
+ with normal input distribution.
+
+ """
+
+ return GeneralisedSobol(toy_model_object, normal_input_dist_object)
+
+
+@pytest.fixture()
+def generalised_sobol_object_uniform(uniform_input_dist_object, toy_model_object):
+ """
+ This function creates the Generalised Sobol indices object
+ with uniform input distribution.
+
+ """
+
+ return GeneralisedSobol(toy_model_object, uniform_input_dist_object)
+
+
+@pytest.fixture()
+def analytical_toy_GSI_normal():
+ """
+ Analytical first order Generalised Sobol indices
+ for the toy example with normal input distribution.
+ """
+
+ return np.array([0.2941, 0.1176]).reshape(-1, 1)
+
+
+@pytest.fixture()
+def analytical_toy_GSI_uniform():
+ """ "
+ Analytical first order Generalised Sobol indices
+ for toy example with uniform input distribution.
+ """
+
+ return np.array([0.6084, 0.3566]).reshape(-1, 1)
+
+
+@pytest.fixture()
+def pick_and_freeze_toy_GSI_normal(generalised_sobol_object_normal):
+ """ "
+ Generalised first order Sobol indices computed using the Pick and Freeze
+ approach for the toy example with normal input distribution.
+ """
+
+ SA = generalised_sobol_object_normal
+
+ np.random.seed(12345) #! set seed for reproducibility
+
+ computed_indices = SA.run(n_samples=100_000)
+
+ return computed_indices["gen_sobol_i"]
+
+
+@pytest.fixture()
+def pick_and_freeze_toy_GSI_uniform(generalised_sobol_object_uniform):
+ """ "
+ Generalised first order Sobol indices computed using the Pick and Freeze
+ approach for the toy example with uniform input distribution.
+ """
+
+ SA = generalised_sobol_object_uniform
+
+ np.random.seed(12345) #! set seed for reproducibility
+
+ computed_indices = SA.run(n_samples=100_000)
+
+ return computed_indices["gen_sobol_i"]
+
+
+@pytest.fixture()
+def NUM_SAMPLES():
+ """This function returns the number of samples for bootstrapping"""
+
+ num_bootstrap_samples = 500
+ num_samples = 20_000
+
+ return num_bootstrap_samples, num_samples
+
+
+@pytest.fixture()
+def bootstrap_generalised_sobol_index_variance(
+ generalised_sobol_object_normal, NUM_SAMPLES
+):
+
+ SA = generalised_sobol_object_normal
+
+ np.random.seed(12345) #! set seed for reproducibility
+
+ num_bootstrap_samples, n_samples = NUM_SAMPLES
+
+ confidence_level = 0.95
+ delta = -scipy.stats.norm.ppf((1 - confidence_level) / 2)
+
+ # Compute the confidence intervals
+
+ computed_indices = SA.run(
+ n_samples=n_samples,
+ num_bootstrap_samples=num_bootstrap_samples,
+ confidence_level=confidence_level,
+ )
+
+ gen_sobol_i = computed_indices["gen_sobol_i"].ravel()
+ gen_sobol_total_i = computed_indices["gen_sobol_total_i"].ravel()
+ upper_bound_first_order = computed_indices["CI_gen_sobol_i"][:, 1]
+ upper_bound_total_order = computed_indices["CI_gen_sobol_total_i"][:, 1]
+
+ std_bootstrap_first_order = (upper_bound_first_order - gen_sobol_i) / delta
+ std_bootstrap_total_order = (upper_bound_total_order - gen_sobol_total_i) / delta
+
+ return std_bootstrap_first_order**2, std_bootstrap_total_order**2
+
+
+@pytest.fixture()
+def model_eval_generalised_sobol_index_variance():
+
+ """
+ For computational efficiency, the variance of the generalised Sobol indices
+ is precomputed using model evaluations with
+ NUM_SAMPLES (num_repetitions=500, num_samples=20_000)
+
+ Copy-paste the following code to generate the variance
+ of the Sobol indices:
+
+ runmodel_obj = RunModel(model_script='multioutput.py',
+ model_object_name='multioutput_toy',
+ vec=True, delete_files=True)
+
+ dist_object_1 = JointInd([Normal(0, 1)]*2)
+
+ SA = GeneralisedSobol(runmodel_obj, dist_object_1)
+
+ np.random.seed(12345) # for reproducibility
+
+ num_repetitions, n_samples = 500, 20_000
+
+ num_vars = 2
+
+ bootstrap_first_order = np.zeros((num_vars, num_bootstrap_samples))
+ bootstrap_total_order = np.zeros((num_vars, num_bootstrap_samples))
+
+ for b in range(num_repetitions):
+
+ computed_indices = SA.run(n_samples=n_samples)
+
+ bootstrap_first_order[:, b] = computed_indices["gen_sobol_i"].ravel()
+ bootstrap_total_order[:, b] = computed_indices["gen_sobol_total_i"].ravel()
+
+ var_bootstrap_gen_S = np.var(bootstrap_first_order, axis=1, ddof=1)
+ var_bootstrap_gen_S_T = np.var(bootstrap_total_order, axis=1, ddof=1)
+
+ print(var_bootstrap_gen_S)
+ print(var_bootstrap_gen_S_T)
+
+ """
+
+ variance_first_order = np.array([0.00011284, 0.00012608])
+
+ variance_total_order = np.array([0.00012448, 0.00011208])
+
+ return variance_first_order, variance_total_order
+
+
+# Unit tests
+###############################################################################
+
+
+def test_pick_and_freeze_estimator(
+ pick_and_freeze_toy_GSI_normal,
+ analytical_toy_GSI_normal,
+ pick_and_freeze_toy_GSI_uniform,
+ analytical_toy_GSI_uniform,
+):
+ """
+ Test the pick and freeze estimator.
+
+ """
+
+ # Prepare
+ N_true = analytical_toy_GSI_normal
+ N_estimate = pick_and_freeze_toy_GSI_normal
+
+ U_true = analytical_toy_GSI_uniform
+ U_estimate = pick_and_freeze_toy_GSI_uniform
+
+ # Act
+ # Idea: Measure accuracy upto 2 decimal places -> rtol=0, atol=1e-2
+ assert np.isclose(N_estimate, N_true, rtol=0, atol=1e-2).all()
+ assert np.isclose(U_estimate, U_true, rtol=0, atol=1e-2).all()
+
+
+def test_bootstrap_variance_computation(
+ model_eval_generalised_sobol_index_variance,
+ bootstrap_generalised_sobol_index_variance,
+):
+
+ """Test the bootstrap variance computation."""
+
+ # Prepare
+ var_first, var_total = model_eval_generalised_sobol_index_variance
+ boot_var_first, boot_var_total = bootstrap_generalised_sobol_index_variance
+
+ # Act
+ assert var_first.shape == boot_var_first.shape
+
+ # Idea: Ensure bootstrap variance and MC variance are of same order -> rtol=0, atol=1e-4
+ assert np.isclose(boot_var_first, var_first, rtol=0, atol=1e-4).all()
+ assert np.isclose(boot_var_total, var_total, rtol=0, atol=1e-4).all()
From f15c7bb4cb6f88b1311698b3f1180806cdb0d67b Mon Sep 17 00:00:00 2001
From: Prateek Bhustali
Date: Mon, 9 May 2022 03:25:12 +0200
Subject: [PATCH 24/59] Added examples generalised sobol sensitivity
---
.../local_mechanical_oscillator_ODE.py | 60 ++++++++++++++
.../generalised_sobol/local_multioutput.py | 42 ++++++++++
...alised_sobol_mechcanical_oscillator_ODE.py | 68 ++++++++++++++++
.../plot_generalised_sobol_multioutput.py | 78 +++++++++++++++++++
4 files changed, 248 insertions(+)
create mode 100644 docs/code/sensitivity/generalised_sobol/local_mechanical_oscillator_ODE.py
create mode 100644 docs/code/sensitivity/generalised_sobol/local_multioutput.py
create mode 100644 docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_mechcanical_oscillator_ODE.py
create mode 100644 docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_multioutput.py
diff --git a/docs/code/sensitivity/generalised_sobol/local_mechanical_oscillator_ODE.py b/docs/code/sensitivity/generalised_sobol/local_mechanical_oscillator_ODE.py
new file mode 100644
index 000000000..13b28c9fa
--- /dev/null
+++ b/docs/code/sensitivity/generalised_sobol/local_mechanical_oscillator_ODE.py
@@ -0,0 +1,60 @@
+"""
+
+Auxiliary file
+==============================================
+
+"""
+
+import numpy as np
+from scipy.integrate import solve_ivp
+
+
+def mech_oscillator(input_parameters):
+ """
+ We have the second order differential equation:
+
+ .. math::
+
+ m \ddot{x} + c \dot{x} + k x = 0
+
+ with initial conditions: :math: `x(0) = \ell`, :math: `\dot{x}(0) = 0`.
+
+ where, for example :math: `m \sim \mathcal{U}(10, 12)`,
+ :math: `c \sim \mathcal{U}(0.4, 0.8)`
+ :math: `k \sim \mathcal{U}(70, 90)`
+ :math: `\ell \sim \mathcal{U}(-1, -0.25)`.
+
+
+ References
+ ----------
+
+ .. [1] Gamboa F, Janon A, Klein T, Lagnoux A, others .
+ Sensitivity analysis for multidimensional and functional outputs.
+ Electronic journal of statistics 2014; 8(1): 575-603.
+
+ """
+
+ # unpack the input parameters
+ m, c, k, l = input_parameters[0]
+
+ # intial conditions
+ x_0 = l
+ v_0 = 0
+
+ # time points
+ t_0 = 0
+ t_f = 40
+ dt = 0.05
+ n_t = int((t_f - t_0) / dt)
+ T = np.linspace(t_0, t_f, n_t)
+
+ def ODE(t, y):
+ """
+ The ODE system.
+ """
+ return np.array([y[1], -(k / m) * y[0] - (c / m) * y[1]])
+
+ # solve the ODE
+ sol = solve_ivp(ODE, [t_0, t_f], [x_0, v_0], method="RK45", t_eval=T)
+
+ return sol.y[0]
diff --git a/docs/code/sensitivity/generalised_sobol/local_multioutput.py b/docs/code/sensitivity/generalised_sobol/local_multioutput.py
new file mode 100644
index 000000000..6974d37c0
--- /dev/null
+++ b/docs/code/sensitivity/generalised_sobol/local_multioutput.py
@@ -0,0 +1,42 @@
+""""
+This is the toy example with multiple outputs from [1]_.
+
+References
+----------
+
+.. [1] Gamboa F, Janon A, Klein T, Lagnoux A, others.
+ Sensitivity analysis for multidimensional and functional outputs.
+ Electronic journal of statistics 2014; 8(1): 575-603.
+
+"""
+
+import numpy as np
+
+
+def evaluate(X):
+
+ """
+
+ * **Input:**
+
+ * **X** (`ndarray`):
+ Samples from the input distribution.
+ Shape: (n_samples, 2)
+
+ * **Output:**
+
+ * **Y** (`ndarray`):
+ Model evaluations.
+ Shape: (2, n_samples)
+
+ """
+
+ n_samples = X.shape[0]
+
+ output = np.zeros((2, n_samples))
+
+ output[0, :] = X[:, 0] + X[:, 1] + X[:, 0] * X[:, 1]
+
+ output[1, :] = 2 * X[:, 0] + X[:, 1] + 3 * X[:, 0] * X[:, 1]
+
+ return output
diff --git a/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_mechcanical_oscillator_ODE.py b/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_mechcanical_oscillator_ODE.py
new file mode 100644
index 000000000..62a19d96d
--- /dev/null
+++ b/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_mechcanical_oscillator_ODE.py
@@ -0,0 +1,68 @@
+r"""
+
+Mechanical oscillator model (multioutput)
+==============================================
+
+The mechanical oscillator is governed by the following second-order ODE:
+
+.. math::
+ m \ddot{x} + c \dot{x} + k x = 0
+
+.. math::
+ x(0) = \ell, \dot{x}(0) = 0.
+
+The parameteres of the oscillator are modeled as follows:
+
+.. math::
+ m \sim \mathcal{U}(10, 12), c \sim \mathcal{U}(0.4, 0.8), k \sim \mathcal{U}(70, 90), \ell \sim \mathcal{U}(-1, -0.25).
+
+"""
+
+# %%
+import numpy as np
+
+from UQpy.run_model.RunModel import RunModel
+from UQpy.run_model.model_execution.PythonModel import PythonModel
+from UQpy.distributions import Uniform, Normal
+from UQpy.distributions.collection.JointIndependent import JointIndependent
+from UQpy.sensitivity.generalised_sobol import GeneralisedSobol
+
+# %%
+# Create Model object
+model = PythonModel(
+ model_script="local_mechanical_oscillator_ODE.py",
+ model_object_name="mech_oscillator",
+ var_names=[r"$m$", "$c$", "$k$", "$\ell$"],
+ delete_files=True,
+)
+
+runmodel_obj = RunModel(model=model)
+
+# Define distribution object
+M = Uniform(10, (12 - 10))
+C = Uniform(0.4, (0.8 - 0.4))
+K = Uniform(70, (90 - 70))
+L = Uniform(-1, (-0.25 - -1))
+dist_object = JointIndependent([M, C, K, L])
+
+# %%
+SA = GeneralisedSobol(runmodel_obj, dist_object)
+
+computed_indices = SA.run(n_samples=500)
+
+# %% [markdown]
+# Expected generalised Sobol indices:
+#
+# $GS_{m}$ = 0.0826
+#
+# $GS_{c}$ = 0.0020
+#
+# $GS_{k}$ = 0.2068
+#
+# $GS_{\ell}$ = 0.0561
+
+# %%
+computed_indices["gen_sobol_i"]
+
+# %%
+computed_indices["gen_sobol_total_i"]
diff --git a/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_multioutput.py b/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_multioutput.py
new file mode 100644
index 000000000..d89cfec12
--- /dev/null
+++ b/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_multioutput.py
@@ -0,0 +1,78 @@
+r"""
+
+Toy multioutput function
+==============================================
+
+.. math::
+ Y = f (X_{1}, X_{2}) := \left(\begin{array}{c}
+ X_{1}+X_{2}+X_{1} X_{2} \\
+ 2 X_{1}+3 X_{1} X_{2}+X_{2}
+ \end{array}\right)
+
+.. math::
+ \text{case 1: } X_1, X_2 \sim \mathcal{U}(0, 1)
+
+.. math::
+ \text{case 2: } X_1, X_2 \sim \mathcal{N}(0, 1)
+
+"""
+
+# %%
+from UQpy.run_model.RunModel import RunModel
+from UQpy.run_model.model_execution.PythonModel import PythonModel
+from UQpy.distributions import Uniform, Normal
+from UQpy.distributions.collection.JointIndependent import JointIndependent
+from UQpy.sensitivity.generalised_sobol import GeneralisedSobol
+
+# %%
+# Create Model object
+model = PythonModel(
+ model_script="local_multioutput.py",
+ model_object_name="evaluate",
+ var_names=[r"X_1$", r"X_2"],
+ delete_files=True,
+)
+
+runmodel_obj = RunModel(model=model)
+
+# Define distribution object
+dist_object_1 = JointIndependent([Normal(0, 1)] * 2)
+dist_object_2 = JointIndependent([Uniform(0, 1)] * 2)
+
+# %%
+SA = GeneralisedSobol(runmodel_obj, dist_object_1)
+
+computed_indices = SA.run(
+ n_samples=20_000, confidence_level=0.95, num_bootstrap_samples=5_00
+)
+
+# %% [markdown]
+# Gaussian case
+#
+# $S_1$ = 0.2941
+#
+# $S_2$ = 0.1179
+
+# %%
+computed_indices["gen_sobol_i"]
+
+# %%
+computed_indices["gen_sobol_total_i"]
+
+# %%
+SA = GeneralisedSobol(runmodel_obj, dist_object_2)
+
+computed_indices = SA.run(n_samples=100_000)
+
+# %% [markdown]
+# Gaussian case
+#
+# $S_1$ = 0.6084
+#
+# $S_2$ = 0.3566
+
+# %%
+computed_indices["gen_sobol_i"]
+
+# %%
+computed_indices["gen_sobol_total_i"]
From 21c02896e2ca5947470f95dc160bb2ca82e7ee85 Mon Sep 17 00:00:00 2001
From: Prateek Bhustali
Date: Mon, 9 May 2022 17:29:37 +0200
Subject: [PATCH 25/59] Improved documentation for Chatterjee indices
+ Fomatting
+ Detailed descriptions
---
docs/code/sensitivity/chatterjee/README.rst | 16 +++++---
.../chatterjee/plot_chatterjee_exponential.py | 26 +++++++++----
.../chatterjee/plot_chatterjee_ishigami.py | 28 ++++++++++++--
.../chatterjee/plot_chatterjee_sobol_func.py | 38 ++++++++++++++-----
.../sensitivity/sobol/plot_sobol_ishigami.py | 14 ++++++-
docs/source/sensitivity/chatterjee.rst | 2 +-
6 files changed, 97 insertions(+), 27 deletions(-)
diff --git a/docs/code/sensitivity/chatterjee/README.rst b/docs/code/sensitivity/chatterjee/README.rst
index 590eee2a7..540581862 100644
--- a/docs/code/sensitivity/chatterjee/README.rst
+++ b/docs/code/sensitivity/chatterjee/README.rst
@@ -2,14 +2,18 @@ Chatterjee indices
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
These examples serve as a guide for using the Chatterjee sensitivity module. They have been taken from various papers to enable validation of the implementation and have been referenced accordingly.
-1. Ishigami function
+1. **Ishigami function**
-2. Exponential function
+ In addition to the Pick and Freeze scheme, the Sobol indices can be estimated using the rank statistics approach [2]_. We demonstrate this estimation of the Sobol indices using the Ishigami function.
-For the Exponential model, analytical Cramer-von Mises indices are available, since they are equivalent to the Chatterjee indices, they are shown here.
+2. **Exponential function**
-3. Sobol function
+ For the Exponential model, analytical Cramér-von Mises indices are available [1]_ and since they are equivalent to the Chatterjee indices in the sample limit, they are shown here.
-This example was considered in [1] page 18.
+3. **Sobol function**
-.. [1] Fabrice Gamboa, Pierre Gremaud, Thierry Klein, and Agnès Lagnoux. (2020). Global Sensitivity Analysis: a new generation of mighty estimators based on rank statistics.
\ No newline at end of file
+ This example was considered in [2]_ (page 18) to compare the Pick and Freeze scheme with the rank statistics approach for estimating the Sobol indices.
+
+.. [1] Gamboa, F., Klein, T., & Lagnoux, A. (2018). Sensitivity Analysis Based on Cramér-von Mises Distance. SIAM/ASA Journal on Uncertainty Quantification, 6(2), 522-548. doi:10.1137/15M1025621. (`Link `_)
+
+.. [2] Fabrice Gamboa, Pierre Gremaud, Thierry Klein, and Agnès Lagnoux. (2020). Global Sensitivity Analysis: a new generation of mighty estimators based on rank statistics.
diff --git a/docs/code/sensitivity/chatterjee/plot_chatterjee_exponential.py b/docs/code/sensitivity/chatterjee/plot_chatterjee_exponential.py
index 2922b97af..81d752653 100644
--- a/docs/code/sensitivity/chatterjee/plot_chatterjee_exponential.py
+++ b/docs/code/sensitivity/chatterjee/plot_chatterjee_exponential.py
@@ -3,9 +3,17 @@
Exponential function
==============================================
+The exponential function was used in [1]_ to demonstrate the
+Cramér-von Mises indices. Chattererjee indices approach the Cramér-von Mises
+indices in the sample limit and will be demonstrated via this example.
+
.. math::
f(x) := \exp(x_1 + 2x_2), \quad x_1, x_2 \sim \mathcal{N}(0, 1)
+.. [1] Gamboa, F., Klein, T., & Lagnoux, A. (2018). Sensitivity Analysis Based on \
+Cramér-von Mises Distance. SIAM/ASA Journal on Uncertainty Quantification, 6(2), \
+522-548. doi:10.1137/15M1025621. (`Link `_)
+
"""
# %%
@@ -15,7 +23,9 @@
from UQpy.distributions.collection.JointIndependent import JointIndependent
from UQpy.sensitivity.chatterjee import Chatterjee
-# %%
+# %% [markdown]
+# **Define the model and input distributions**
+
# Create Model object
model = PythonModel(
model_script="local_exponential.py",
@@ -33,22 +43,24 @@
dist_object = JointIndependent([Normal(0, 1)] * 2)
# %% [markdown]
-# Compute Chatterjee indices
+# **Compute Chatterjee indices**
-# %%
+# %% [markdown]
SA = Chatterjee(runmodel_obj, dist_object)
-# Compute Sobol indices using the pick and freeze algorithm
+# Compute Chatterjee indices using the pick and freeze algorithm
computed_indices = SA.run(n_samples=1_000_000)
# %% [markdown]
-# Cramer-von Mises sensitivity analysis
+# **Chattererjee indices**
+#
+# Chattererjee indices approach the Cramér-von Mises indices in the sample limit.
#
# Expected value of the sensitivity indices:
#
-# $S^1_{CVM} = \frac{6}{\pi} \operatorname{arctan}(2) - 2 \approx 0.1145$
+# :math:`S^1_{CVM} = \frac{6}{\pi} \operatorname{arctan}(2) - 2 \approx 0.1145`
#
-# $S^2_{CVM} = \frac{6}{\pi} \operatorname{arctan}(\sqrt{19}) - 2 \approx 0.5693$
+# :math:`S^2_{CVM} = \frac{6}{\pi} \operatorname{arctan}(\sqrt{19}) - 2 \approx 0.5693`
# %%
computed_indices["chatterjee_i"]
diff --git a/docs/code/sensitivity/chatterjee/plot_chatterjee_ishigami.py b/docs/code/sensitivity/chatterjee/plot_chatterjee_ishigami.py
index 66897d670..d3759fc10 100644
--- a/docs/code/sensitivity/chatterjee/plot_chatterjee_ishigami.py
+++ b/docs/code/sensitivity/chatterjee/plot_chatterjee_ishigami.py
@@ -3,6 +3,9 @@
Ishigami function
==============================================
+The ishigami function is a non-linear, non-monotonic function that is commonly used to
+benchmark uncertainty and senstivity analysis methods.
+
.. math::
f(x_1, x_2, x_3) = sin(x_1) + a \cdot sin^2(x_2) + b \cdot x_3^4 sin(x_1)
@@ -20,7 +23,9 @@
from UQpy.distributions.collection.JointIndependent import JointIndependent
from UQpy.sensitivity.chatterjee import Chatterjee
-# %%
+# %% [markdown]
+# **Define the model and input distributions**
+
# Create Model object
model = PythonModel(
model_script="local_ishigami.py",
@@ -36,9 +41,9 @@
dist_object = JointIndependent([Uniform(-np.pi, 2 * np.pi)] * 3)
# %% [markdown]
-# Compute Chatterjee indices
+# **Compute Chatterjee indices**
-# %%
+# %% [markdown]
SA = Chatterjee(runmodel_obj, dist_object)
computed_indices = SA.run(
@@ -48,11 +53,28 @@
confidence_level=0.95,
)
+# %% [markdown]
+# **Chattererjee indices**
+
# %%
computed_indices["chatterjee_i"]
+# %% [markdown]
+# **Confidence intervals for the Chatterjee indices**
+
# %%
computed_indices["CI_chatterjee_i"]
+# %% [markdown]
+# **Estimated Sobol indices**
+#
+# Expected first order Sobol indices:
+#
+# :math:`S_1`: 0.3139
+#
+# :math:`S_2`: 0.4424
+#
+# :math:`S_3`: 0.0
+
# %%
computed_indices["sobol_i"]
diff --git a/docs/code/sensitivity/chatterjee/plot_chatterjee_sobol_func.py b/docs/code/sensitivity/chatterjee/plot_chatterjee_sobol_func.py
index 578131426..0169597e1 100644
--- a/docs/code/sensitivity/chatterjee/plot_chatterjee_sobol_func.py
+++ b/docs/code/sensitivity/chatterjee/plot_chatterjee_sobol_func.py
@@ -3,6 +3,15 @@
Sobol function
==============================================
+The Sobol function is non-linear function that is commonly used to benchmark uncertainty
+and senstivity analysis methods. Unlike the ishigami function which has 3 input
+variables, the Sobol function can have any number of input variables.
+
+This function was used in [1]_ to compare the Pick and Freeze approach and the rank
+statistics approach to estimating Sobol indices. The rank statistics approach was
+observed to be more accurate than the Pick and Freeze approach and it also provides
+better estimates when only a small number of model evaluations are available.
+
.. math::
g(x_1, x_2, \ldots, x_D) := \prod_{i=1}^{D} \frac{|4x_i - 2| + a_i}{1 + a_i},
@@ -12,6 +21,8 @@
.. math::
x_i \sim \mathcal{U}(0, 1), \quad a_i \in \mathbb{R}.
+.. [1] Fabrice Gamboa, Pierre Gremaud, Thierry Klein, and Agnès Lagnoux. (2020). Global Sensitivity Analysis: a new generation of mighty estimators based on rank statistics.
+
"""
# %%
@@ -23,7 +34,9 @@
from UQpy.distributions.collection.JointIndependent import JointIndependent
from UQpy.sensitivity.chatterjee import Chatterjee
-# %%
+# %% [markdown]
+# **Define the model and input distributions**
+
# Create Model object
num_vars = 6
a_vals = np.array([0.0, 0.5, 3.0, 9.0, 99.0, 99.0])
@@ -42,29 +55,36 @@
dist_object = JointIndependent([Uniform(0, 1)] * num_vars)
# %% [markdown]
-# Compute Chatterjee indices
+# **Compute Chatterjee indices**
-# %%
+# %% [markdown]
SA = Chatterjee(runmodel_obj, dist_object)
# Compute Sobol indices using the pick and freeze algorithm
computed_indices = SA.run(n_samples=500_000, estimate_sobol_indices=True)
+# %% [markdown]
+# **Chatterjee indices**
+
# %%
computed_indices["chatterjee_i"]
# %% [markdown]
-# $S_1$ = 5.86781190e-01
+# **Estimated Sobol indices**
+#
+# Expected first order Sobol indices:
+#
+# :math:`S_1` = 5.86781190e-01
#
-# $S_2$ = 2.60791640e-01
+# :math:`S_2` = 2.60791640e-01
#
-# $S_3$ = 3.66738244e-02
+# :math:`S_3` = 3.66738244e-02
#
-# $S_4$ = 5.86781190e-03
+# :math:`S_4` = 5.86781190e-03
#
-# $S_5$ = 5.86781190e-05
+# :math:`S_5` = 5.86781190e-05
#
-# $S_6$ = 5.86781190e-05
+# :math:`S_6` = 5.86781190e-05
# %%
computed_indices["sobol_i"]
diff --git a/docs/code/sensitivity/sobol/plot_sobol_ishigami.py b/docs/code/sensitivity/sobol/plot_sobol_ishigami.py
index a448a61b4..f067aa4b3 100644
--- a/docs/code/sensitivity/sobol/plot_sobol_ishigami.py
+++ b/docs/code/sensitivity/sobol/plot_sobol_ishigami.py
@@ -3,6 +3,9 @@
Ishigami function
==============================================
+The ishigami function is a non-linear, non-monotonic function that is commonly used in
+uncertainty and senstivity analysis methods.
+
.. math::
f(x_1, x_2, x_3) = sin(x_1) + a \cdot sin^2(x_2) + b \cdot x_3^4 sin(x_1)
@@ -44,6 +47,9 @@
from UQpy.distributions.collection.JointIndependent import JointIndependent
from UQpy.sensitivity.sobol import Sobol
+# %% [markdown]
+# **Define the model and input distributions**
+
# %%
# Create Model object
model = PythonModel(
@@ -59,12 +65,17 @@
# Define distribution object
dist_object = JointIndependent([Uniform(-np.pi, 2 * np.pi)] * 3)
+# %% [markdown]
+# **Compute Sobol indices**
+
# %%
SA = Sobol(runmodel_obj, dist_object)
computed_indices = SA.run(n_samples=100_000, num_bootstrap_samples=100)
# %% [markdown]
+# **Sobol indices**
+#
# Expected first order Sobol indices:
#
# X1: 0.3139
@@ -72,12 +83,13 @@
# X2: 0.4424
#
# X3: 0.0
-#
# %%
computed_indices["sobol_i"]
# %% [markdown]
+# **Total order Sobol indices**
+#
# Expected total order Sobol indices:
#
# X1: 0.55758886
diff --git a/docs/source/sensitivity/chatterjee.rst b/docs/source/sensitivity/chatterjee.rst
index 88f77889b..e2e8ce006 100644
--- a/docs/source/sensitivity/chatterjee.rst
+++ b/docs/source/sensitivity/chatterjee.rst
@@ -9,7 +9,7 @@ Consider :math:`n` samples of random variables :math:`X` and :math:`Y`, with :ma
\xi_{n}(X, Y):=1-\frac{3 \sum_{i=1}^{n-1}\left|r_{i+1}-r_{i}\right|}{n^{2}-1}
-The Chatterjee index converges for :math:`n \rightarrow \infty` to the Cramér-von Mises index and is faster to estimate than using the Pick and Freeze approach in the Cramer-von Mises index.
+The Chatterjee index converges for :math:`n \rightarrow \infty` to the Cramér-von Mises index and is faster to estimate than using the Pick and Freeze approach in the Cramér-von Mises index.
.. [1] Sourav Chatterjee (2021) A New Coefficient of Correlation, Journal of the American Statistical Association, 116:536, 2009-2022, DOI: 10.1080/01621459.2020.1758115 (`Link `_)
From 86dcd1aacc8a40869247f2866cde36814f25cf77 Mon Sep 17 00:00:00 2001
From: Prateek Bhustali
Date: Mon, 9 May 2022 18:05:44 +0200
Subject: [PATCH 26/59] Improved documentation for CramervonMises indices
+ Fomatting
+ Detailed descriptions
---
.../sensitivity/cramer_von_mises/README.rst | 10 ++++++
.../cramer_von_mises/plot_cvm_exponential.py | 32 +++++++++++++++----
.../cramer_von_mises/plot_cvm_sobol_func.py | 31 ++++++++++++------
docs/source/sensitivity/cramer_von_mises.rst | 2 +-
4 files changed, 59 insertions(+), 16 deletions(-)
diff --git a/docs/code/sensitivity/cramer_von_mises/README.rst b/docs/code/sensitivity/cramer_von_mises/README.rst
index ea5f804b6..b87758792 100644
--- a/docs/code/sensitivity/cramer_von_mises/README.rst
+++ b/docs/code/sensitivity/cramer_von_mises/README.rst
@@ -1,3 +1,13 @@
Cramér-von Mises Sensitivity indices
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+These examples serve as a guide for using the Cramér-von Mises sensitivity module. They have been taken from various papers to enable validation of the implementation and have been referenced accordingly.
+1. **Exponential function**
+
+ For the Exponential model, analytical Cramér-von Mises indices are available [1]_.
+
+2. **Sobol function**
+
+ The Cramér-von Mises indices are computed using the Pick and Freeze approach [1]_. These model evaluations can be used to estimate the Sobol indices as well. We demonstrate this using the Sobol function.
+
+.. [1] Gamboa, F., Klein, T., & Lagnoux, A. (2018). Sensitivity Analysis Based on Cramér-von Mises Distance. SIAM/ASA Journal on Uncertainty Quantification, 6(2), 522-548. doi:10.1137/15M1025621. (`Link `_)
\ No newline at end of file
diff --git a/docs/code/sensitivity/cramer_von_mises/plot_cvm_exponential.py b/docs/code/sensitivity/cramer_von_mises/plot_cvm_exponential.py
index 244fd7805..e6949a71b 100644
--- a/docs/code/sensitivity/cramer_von_mises/plot_cvm_exponential.py
+++ b/docs/code/sensitivity/cramer_von_mises/plot_cvm_exponential.py
@@ -3,9 +3,16 @@
Exponential function
==============================================
+The exponential function was used in [1]_ to demonstrate the
+Cramér-von Mises indices.
+
.. math::
f(x) := \exp(x_1 + 2x_2), \quad x_1, x_2 \sim \mathcal{N}(0, 1)
+.. [1] Gamboa, F., Klein, T., & Lagnoux, A. (2018). Sensitivity Analysis Based on \
+Cramér-von Mises Distance. SIAM/ASA Journal on Uncertainty Quantification, 6(2), \
+522-548. doi:10.1137/15M1025621. (`Link `_)
+
"""
# %%
@@ -15,7 +22,9 @@
from UQpy.distributions.collection.JointIndependent import JointIndependent
from UQpy.sensitivity.cramer_von_mises import CramervonMises as cvm
-# %%
+# %% [markdown]
+# **Define the model and input distributions**
+
# Create Model object
model = PythonModel(
model_script="local_exponential.py",
@@ -30,29 +39,40 @@
dist_object = JointIndependent([Normal(0, 1)] * 2)
# %% [markdown]
-# Compute Cramer-von Mises indices
+# **Compute Cramér-von Mises indices**
# %%
-# create cvm object
SA = cvm(runmodel_obj, dist_object)
# Compute Sobol indices using the pick and freeze algorithm
computed_indices = SA.run(n_samples=20_000, estimate_sobol_indices=True)
# %% [markdown]
-# Cramer-von Mises sensitivity analysis
+# **Cramér-von Mises indices**
#
# Expected value of the sensitivity indices:
#
-# $S^1_{CVM} = \frac{6}{\pi} \operatorname{arctan}(2) - 2 \approx 0.1145$
+# :math:`S^1_{CVM} = \frac{6}{\pi} \operatorname{arctan}(2) - 2 \approx 0.1145`
#
-# $S^2_{CVM} = \frac{6}{\pi} \operatorname{arctan}(\sqrt{19}) - 2 \approx 0.5693$
+# :math:`S^2_{CVM} = \frac{6}{\pi} \operatorname{arctan}(\sqrt{19}) - 2 \approx 0.5693`
# %%
computed_indices["CVM_i"]
+# %% [markdown]
+# **Estimated first order Sobol indices**
+#
+# Expected first order Sobol indices:
+#
+# :math:`S_1` = 0.0118
+#
+# :math:`S_2` = 0.3738
+
# %%
computed_indices["sobol_i"]
+# %% [markdown]
+# **Estimated total order Sobol indices**
+
# %%
computed_indices["sobol_total_i"]
diff --git a/docs/code/sensitivity/cramer_von_mises/plot_cvm_sobol_func.py b/docs/code/sensitivity/cramer_von_mises/plot_cvm_sobol_func.py
index da17e3e2f..ff86ab30e 100644
--- a/docs/code/sensitivity/cramer_von_mises/plot_cvm_sobol_func.py
+++ b/docs/code/sensitivity/cramer_von_mises/plot_cvm_sobol_func.py
@@ -3,6 +3,10 @@
Sobol function
==============================================
+The Sobol function is non-linear function that is commonly used to benchmark uncertainty
+and senstivity analysis methods. Unlike the ishigami function which has 3 input
+variables, the Sobol function can have any number of input variables.
+
.. math::
g(x_1, x_2, \ldots, x_D) := \prod_{i=1}^{D} \frac{|4x_i - 2| + a_i}{1 + a_i},
@@ -23,10 +27,12 @@
from UQpy.distributions.collection.JointIndependent import JointIndependent
from UQpy.sensitivity.cramer_von_mises import CramervonMises as cvm
-# %%
+# %% [markdown]
+# **Define the model and input distributions**
+
# Create Model object
num_vars = 6
-a_vals = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
+a_vals = np.array([0.0, 0.5, 3.0, 9.0, 99.0, 99.0])
model = PythonModel(
model_script="local_sobol_func.py",
@@ -41,30 +47,37 @@
# Define distribution object
dist_object = JointIndependent([Uniform(0, 1)] * num_vars)
+# %% [markdown]
+# **Compute Cramér-von Mises indices**
+
# %%
SA = cvm(runmodel_obj, dist_object)
# Compute Sobol indices using the pick and freeze algorithm
computed_indices = SA.run(n_samples=20_000, estimate_sobol_indices=True)
+# %% [markdown]
+# **Cramér-von Mises indices**
+
# %%
computed_indices["CVM_i"]
# %% [markdown]
-# Sobol indices computed analytically
+# **Estimated Sobol indices**
#
-# $S_1$ = 0.46067666
+# Expected first order Sobol indices:
#
-# $S_2$ = 0.20474518
+# :math:`S_1` = 5.86781190e-01
#
-# $S_3$ = 0.11516917
+# :math:`S_2` = 2.60791640e-01
#
-# $S_4$ = 0.07370827
+# :math:`S_3` = 3.66738244e-02
#
-# $S_5$ = 0.0511863
+# :math:`S_4` = 5.86781190e-03
#
-# $S_6$ = 0.03760626
+# :math:`S_5` = 5.86781190e-05
#
+# :math:`S_6` = 5.86781190e-05
# %%
computed_indices["sobol_i"]
diff --git a/docs/source/sensitivity/cramer_von_mises.rst b/docs/source/sensitivity/cramer_von_mises.rst
index ccd412af9..1958c128a 100644
--- a/docs/source/sensitivity/cramer_von_mises.rst
+++ b/docs/source/sensitivity/cramer_von_mises.rst
@@ -1,7 +1,7 @@
Cramér-von Mises indices
----------------------------------------
-A sensitivity index based on the Cramér-von Mises distance. In contrast to variance based Sobol indices it takes into account the whole distribution of the model output and is therefore considered as a moment-free method [1]_. Furthermore the index can be naturally extended to multivariate model outputs (not implemented yet in UQPy).
+A sensitivity index based on the Cramér-von Mises distance. In contrast to the variance based Sobol indices, it takes into account the whole distribution of the model output and is therefore considered as a moment-free method [1]_. Furthermore the index can be naturally extended to multivariate model outputs (not implemented yet in UQPy).
Consider a model :math:`Y=f(X): \mathbb{R}^d \rightarrow \mathbb{R}^k` with :math:`d` inputs :math:`X_{(1)}, X_{(2)}, \ldots, X_{(d)}` and :math:`k` outputs :math:`Y_{(1)}, Y_{(2)}, \ldots, Y_{(k)}`. We define the cumulative distribution function :math:`F(t)` of :math:`Y` as:
From d48552cddd22fc0181baa1135e83fba27c920873 Mon Sep 17 00:00:00 2001
From: Prateek Bhustali
Date: Mon, 9 May 2022 18:47:11 +0200
Subject: [PATCH 27/59] Improved documentation GeneralisedSobol indices
+ Fomatting
+ Detailed descriptions
---
.../sensitivity/generalised_sobol/README.rst | 14 ++++---
...alised_sobol_mechanical_oscillator_ODE.py} | 26 ++++++++++---
.../plot_generalised_sobol_multioutput.py | 39 +++++++++++++------
docs/source/sensitivity/generalised_sobol.rst | 17 ++++++--
4 files changed, 70 insertions(+), 26 deletions(-)
rename docs/code/sensitivity/generalised_sobol/{plot_generalised_sobol_mechcanical_oscillator_ODE.py => plot_generalised_sobol_mechanical_oscillator_ODE.py} (71%)
diff --git a/docs/code/sensitivity/generalised_sobol/README.rst b/docs/code/sensitivity/generalised_sobol/README.rst
index 88a5bec5e..78ede7984 100644
--- a/docs/code/sensitivity/generalised_sobol/README.rst
+++ b/docs/code/sensitivity/generalised_sobol/README.rst
@@ -1,12 +1,14 @@
Generalised Sobol Sensitivity indices
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-We demonstrate the computation of GSI for 2 examples with multiple outputs:
+These examples serve as a guide for using the GSI sensitivity module. They have been taken from various papers to enable validation of the implementation and have been referenced accordingly.
-1. Mechanical oscillator (analytical solution): Example from [1] page 2
-2. Mechanical oscillator ODE (numerical solution): Example from [2] page 19
-3. Toy example (analytical solution): Example from [2]
+1. **Mechanical oscillator ODE**
-.. [1] Alexanderian, Alen, Gremaud, Pierre A and Smith, Ralph C. Variance-based sensitivity analysis for time-dependent processes.
+ The GSI sensitivity indices are computed for a mechanical oscillator governed by a second-order differential equation [1]_. The model outputs the displacement of the oscillator for a given time period. Unlike the pointwise-in-time Sobol indices, which provide the sensitivity of the model parameters at each point in time, the GSI indices summarise the sensitivities of the model parameters over the entire time period.
-.. [2] Gamboa F, Janon A, Klein T, Lagnoux A, others. Sensitivity analysis for multidimensional and functional outputs. Electronic journal of statistics 2014; 8(1): 575-603.
\ No newline at end of file
+2. **Toy example**
+
+ The GSI sensitivity indices are computed for a toy model whose analytical solution is given in [1]_.
+
+.. [1] Gamboa F, Janon A, Klein T, Lagnoux A, others. Sensitivity analysis for multidimensional and functional outputs. Electronic journal of statistics 2014; 8(1): 575-603.
\ No newline at end of file
diff --git a/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_mechcanical_oscillator_ODE.py b/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_mechanical_oscillator_ODE.py
similarity index 71%
rename from docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_mechcanical_oscillator_ODE.py
rename to docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_mechanical_oscillator_ODE.py
index 62a19d96d..361bde6aa 100644
--- a/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_mechcanical_oscillator_ODE.py
+++ b/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_mechanical_oscillator_ODE.py
@@ -16,6 +16,10 @@
.. math::
m \sim \mathcal{U}(10, 12), c \sim \mathcal{U}(0.4, 0.8), k \sim \mathcal{U}(70, 90), \ell \sim \mathcal{U}(-1, -0.25).
+Unlike the pointwise-in-time Sobol indices, which provide the sensitivity of the model
+parameters at each point in time, the GSI indices summarise the sensitivities of the
+model parameters over the entire time period.
+
"""
# %%
@@ -27,7 +31,9 @@
from UQpy.distributions.collection.JointIndependent import JointIndependent
from UQpy.sensitivity.generalised_sobol import GeneralisedSobol
-# %%
+# %% [markdown]
+# **Define the model and input distributions**
+
# Create Model object
model = PythonModel(
model_script="local_mechanical_oscillator_ODE.py",
@@ -45,24 +51,32 @@
L = Uniform(-1, (-0.25 - -1))
dist_object = JointIndependent([M, C, K, L])
-# %%
+# %% [markdown]
+# **Compute generalised Sobol indices**
+
+# %% [markdown]
SA = GeneralisedSobol(runmodel_obj, dist_object)
computed_indices = SA.run(n_samples=500)
# %% [markdown]
+# **First order Generalised Sobol indices**
+#
# Expected generalised Sobol indices:
#
-# $GS_{m}$ = 0.0826
+# :math:`GS_{m}` = 0.0826
#
-# $GS_{c}$ = 0.0020
+# :math:`GS_{c}` = 0.0020
#
-# $GS_{k}$ = 0.2068
+# :math:`GS_{k}` = 0.2068
#
-# $GS_{\ell}$ = 0.0561
+# :math:`GS_{\ell}` = 0.0561
# %%
computed_indices["gen_sobol_i"]
+# %% [markdown]
+# **Total order Generalised Sobol indices**
+
# %%
computed_indices["gen_sobol_total_i"]
diff --git a/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_multioutput.py b/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_multioutput.py
index d89cfec12..1b673ddcf 100644
--- a/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_multioutput.py
+++ b/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_multioutput.py
@@ -10,10 +10,10 @@
\end{array}\right)
.. math::
- \text{case 1: } X_1, X_2 \sim \mathcal{U}(0, 1)
+ \text{case 1: } X_1, X_2 \sim \mathcal{N}(0, 1)
.. math::
- \text{case 2: } X_1, X_2 \sim \mathcal{N}(0, 1)
+ \text{case 2: } X_1, X_2 \sim \mathcal{U}(0, 1)
"""
@@ -24,7 +24,9 @@
from UQpy.distributions.collection.JointIndependent import JointIndependent
from UQpy.sensitivity.generalised_sobol import GeneralisedSobol
-# %%
+# %% [markdown]
+# **Define the model and input distributions**
+
# Create Model object
model = PythonModel(
model_script="local_multioutput.py",
@@ -37,9 +39,11 @@
# Define distribution object
dist_object_1 = JointIndependent([Normal(0, 1)] * 2)
-dist_object_2 = JointIndependent([Uniform(0, 1)] * 2)
-# %%
+# %% [markdown]
+# **Compute generalised Sobol indices**
+
+# %% [markdown]
SA = GeneralisedSobol(runmodel_obj, dist_object_1)
computed_indices = SA.run(
@@ -47,11 +51,15 @@
)
# %% [markdown]
+# **First order Generalised Sobol indices**
+#
+# Expected generalised Sobol indices:
+#
# Gaussian case
#
-# $S_1$ = 0.2941
+# :math:`GS_1` = 0.2941
#
-# $S_2$ = 0.1179
+# :math:`GS_2` = 0.1179
# %%
computed_indices["gen_sobol_i"]
@@ -59,17 +67,26 @@
# %%
computed_indices["gen_sobol_total_i"]
-# %%
+# %% [markdown]
+# **Compute generalised Sobol indices**
+
+# %% [markdown]
+dist_object_2 = JointIndependent([Uniform(0, 1)] * 2)
+
SA = GeneralisedSobol(runmodel_obj, dist_object_2)
computed_indices = SA.run(n_samples=100_000)
# %% [markdown]
-# Gaussian case
+# **First order Generalised Sobol indices**
+#
+# Expected generalised Sobol indices:
+#
+# Uniform case
#
-# $S_1$ = 0.6084
+# :math:`GS_1` = 0.6084
#
-# $S_2$ = 0.3566
+# :math:`GS_2` = 0.3566
# %%
computed_indices["gen_sobol_i"]
diff --git a/docs/source/sensitivity/generalised_sobol.rst b/docs/source/sensitivity/generalised_sobol.rst
index 3515a744b..402b3190e 100644
--- a/docs/source/sensitivity/generalised_sobol.rst
+++ b/docs/source/sensitivity/generalised_sobol.rst
@@ -11,16 +11,27 @@ As the inputs :math:`X_{1}, \ldots, X_{d}` are independent, :math:`f` may be dec
f(X) = c + f_{\mathbf{u}}\left(X_{\mathbf{u}}\right)+f_{\sim \mathbf{u}}\left(X_{\sim \mathbf{u}}\right) + f_{\mathbf{u}, \sim \mathbf{u}}\left(X_{\mathbf{u}}, X_{\sim \mathbf{u}}\right)
where :math:`c \in \mathbb{R}^{k}, f_{\mathbf{u}}: E_{\mathbf{u}} \rightarrow \mathbb{R}^{k}, f_{\sim \mathbf{u}}: E_{\sim \mathbf{u}} \rightarrow \mathbb{R}^{k}` and :math:`f_{\mathbf{u}, \sim \mathbf{u}}: E \rightarrow \mathbb{R}^{k}` are given by
-:math:`c=\mathbb{E}(Y), f_{\mathbf{u}}=\mathbb{E}\left(Y \mid X_{\mathbf{u}}\right)-c, f_{\sim \mathbf{u}}=\mathbb{E}\left(Y \mid X_{\sim \mathbf{u}}\right)-c, f_{u, \sim \mathbf{u}}=Y-f_{\mathbf{u}}-f_{\sim \mathbf{u}}-c`
+
+.. math::
+ c = \mathbb{E}(Y),
+
+.. math::
+ f_{\mathbf{u}}=\mathbb{E}\left(Y \mid X_{\mathbf{u}}\right)-c,
+
+.. math::
+ f_{\sim \mathbf{u}}=\mathbb{E}\left(Y \mid X_{\sim \mathbf{u}}\right)-c,
+
+.. math::
+ f_{u, \sim \mathbf{u}}=Y-f_{\mathbf{u}}-f_{\sim \mathbf{u}}-c.
Thanks to :math:`L^{2}`-orthogonality, computing the covariance matrix of both sides of the above equation leads to
.. math::
\Sigma = C_{\mathbf{u}}+C_{\sim \mathbf{u}}+C_{\mathbf{u}, \sim \mathbf{u}}.
-Here, :math:`\Sigma, C_{\mathbf{u}}, C_{\sim \mathbf{u}}` and :math:`C_{\mathbf{u}, \sim \mathbf{u}}` are denoting respectively the covariance matrices of :math:`Y, f_{\mathbf{u}}\left(X_{\mathbf{u}}\right), f_{\sim \mathbf{u}}\left(X_{\sim \mathbf{u}}\right)` and :math:`f_{\mathbf{u}, \sim \mathbf{u}}\left(X_{\mathbf{u}}, X_{\sim \mathbf{u}}\right)`.
+Here, :math:`\Sigma, C_{\mathbf{u}}, C_{\sim \mathbf{u}}` and :math:`C_{\mathbf{u}, \sim \mathbf{u}}` are denoting the covariance matrices of :math:`Y, f_{\mathbf{u}}\left(X_{\mathbf{u}}\right), f_{\sim \mathbf{u}}\left(X_{\sim \mathbf{u}}\right)` and :math:`f_{\mathbf{u}, \sim \mathbf{u}}\left(X_{\mathbf{u}}, X_{\sim \mathbf{u}}\right)` respectively.
-The First order generalised Sobol indices can be computed using the Pick-and-Freeze approach as follows, where :math:`\mathbf{u}` is a variable :math:`i` of the independent random variables.
+The first order generalised Sobol indices can be computed using the Pick-and-Freeze approach as follows, where :math:`\mathbf{u}` is a variable :math:`i` of the independent random variables.
.. math::
S_{i, N}=\frac{\operatorname{Tr}\left(C_{i, N}\right)}{\operatorname{Tr}\left(\Sigma_{N}\right)}
From d842ec67457ba1cc49df0eb005af160b7e2809f4 Mon Sep 17 00:00:00 2001
From: Prateek Bhustali
Date: Mon, 9 May 2022 23:14:18 +0200
Subject: [PATCH 28/59] Improved documentation Sobol indices
+ Fomatting
+ Detailed descriptions
---
docs/code/sensitivity/sobol/README.rst | 23 ++++--
docs/code/sensitivity/sobol/local_additive.py | 21 +++++
.../sensitivity/sobol/local_exponential.py | 20 -----
.../sobol/plot_mechanical_oscillator_ODE.py | 16 +++-
..._exponential.py => plot_sobol_additive.py} | 36 ++++-----
.../code/sensitivity/sobol/plot_sobol_func.py | 78 +++++++++++--------
.../sensitivity/sobol/plot_sobol_ishigami.py | 24 +++---
docs/source/sensitivity/sobol.rst | 14 ++--
8 files changed, 132 insertions(+), 100 deletions(-)
create mode 100644 docs/code/sensitivity/sobol/local_additive.py
delete mode 100644 docs/code/sensitivity/sobol/local_exponential.py
rename docs/code/sensitivity/sobol/{plot_sobol_exponential.py => plot_sobol_additive.py} (50%)
diff --git a/docs/code/sensitivity/sobol/README.rst b/docs/code/sensitivity/sobol/README.rst
index 1be801c21..c927409f5 100644
--- a/docs/code/sensitivity/sobol/README.rst
+++ b/docs/code/sensitivity/sobol/README.rst
@@ -7,18 +7,29 @@ Single output models
======================
We demonstrate the computation of the Sobol indices for models with a single output using the following examples:
-1. Ishigami function
-2. Exponential function
-3. Sobol function with parameters a := [0., 0.5, 3., 9., 99., 99.] : Example from [2] page 11
+1. **Additive function**
+
+ This is a beginner-friendly example for introducing Sobol indices. The function is a linear combination of two inputs which produces a scalar output.
+
+2. **Ishigami function**
+
+ The Ishigami function is a non-linear, non-monotonic function that is commonly used to benchmark uncertainty and senstivity analysis methods.
+
+3. **Sobol function**
+
+ The Sobol function is non-linear function that is commonly used to benchmark uncertainty
+ and senstivity analysis methods. Unlike the Ishigami function which has 3 input
+ variables, the Sobol function can have any number of input variables (see [2]_).
Multiple output models
========================
We demonstrate the computation of the Sobol indices for models with multiple outputs using the following example:
-1. Mechanical oscillator ODE (numerical model): Example from [1] page 19
+1. **Mechanical oscillator ODE**
+ The Sobol indices are computed for a mechanical oscillator governed by a second-order differential equation [1]_. The model outputs the displacement of the oscillator for a given time period. Here the sensitivity of the model parameters are computed at each point in time (see [1]_).
-[1] Gamboa F, Janon A, Klein T, Lagnoux A, others. Sensitivity analysis for multidimensional and functional outputs. Electronic journal of statistics 2014; 8(1): 575-603.
+.. [1] Gamboa F, Janon A, Klein T, Lagnoux A, others. Sensitivity analysis for multidimensional and functional outputs. Electronic journal of statistics 2014; 8(1): 575-603.
-[2] Saltelli, A. (2002). Making best use of model evaluations to compute indices.
+.. [2] Saltelli, A. (2002). Making best use of model evaluations to compute indices.
diff --git a/docs/code/sensitivity/sobol/local_additive.py b/docs/code/sensitivity/sobol/local_additive.py
new file mode 100644
index 000000000..a0893fa11
--- /dev/null
+++ b/docs/code/sensitivity/sobol/local_additive.py
@@ -0,0 +1,21 @@
+"""
+
+Auxiliary file
+==============================================
+
+"""
+
+import numpy as np
+
+
+def evaluate(X, params) -> np.array:
+ r"""A linear function that is used to demonstrate sensitivity indices.
+
+ .. math::
+ f(x) = a \cdot x_1 + b \cdot x_2
+ """
+ a, b = params
+
+ Y = a * X[:, 0] + b * X[:, 1]
+
+ return Y
diff --git a/docs/code/sensitivity/sobol/local_exponential.py b/docs/code/sensitivity/sobol/local_exponential.py
deleted file mode 100644
index 1fd0ef0d9..000000000
--- a/docs/code/sensitivity/sobol/local_exponential.py
+++ /dev/null
@@ -1,20 +0,0 @@
-"""
-
-Auxiliary file
-==============================================
-
-"""
-
-import numpy as np
-
-
-def evaluate(X: np.array) -> np.array:
- r"""A non-linear function that is used to demonstrate sensitivity index.
-
- .. math::
- f(x) = \exp(x_1 + 2*x_2)
- """
-
- Y = np.exp(X[:, 0] + 2 * X[:, 1])
-
- return Y
diff --git a/docs/code/sensitivity/sobol/plot_mechanical_oscillator_ODE.py b/docs/code/sensitivity/sobol/plot_mechanical_oscillator_ODE.py
index 6e03332d0..05636321f 100644
--- a/docs/code/sensitivity/sobol/plot_mechanical_oscillator_ODE.py
+++ b/docs/code/sensitivity/sobol/plot_mechanical_oscillator_ODE.py
@@ -16,6 +16,10 @@
.. math::
m \sim \mathcal{U}(10, 12), c \sim \mathcal{U}(0.4, 0.8), k \sim \mathcal{U}(70, 90), \ell \sim \mathcal{U}(-1, -0.25).
+Here, we compute the Sobol indices for each point in time and are called
+pointwise-in-time Sobol indices. These indices describe the sensitivity of the model
+parameters at each point in time.
+
"""
# %%
@@ -28,7 +32,9 @@
from UQpy.distributions.collection.JointIndependent import JointIndependent
from UQpy.sensitivity.sobol import Sobol
-# %%
+# %% [markdown]
+# **Define the model and input distributions**
+
# Create Model object
model = PythonModel(
model_script="local_mechanical_oscillator_ODE.py",
@@ -46,13 +52,17 @@
L = Uniform(-1, (-0.25 - -1))
dist_object = JointIndependent([M, C, K, L])
-# %%
+# %% [markdown]
+# **Compute Sobol indices**
+
+# %% [markdown]
SA = Sobol(runmodel_obj, dist_object)
computed_indices = SA.run(n_samples=500)
# %%
-# Plot the Sobol indices
+# **Plot the Sobol indices**
+
t_0 = 0
t_f = 40
dt = 0.05
diff --git a/docs/code/sensitivity/sobol/plot_sobol_exponential.py b/docs/code/sensitivity/sobol/plot_sobol_additive.py
similarity index 50%
rename from docs/code/sensitivity/sobol/plot_sobol_exponential.py
rename to docs/code/sensitivity/sobol/plot_sobol_additive.py
index 81c9b78e9..4645ed749 100644
--- a/docs/code/sensitivity/sobol/plot_sobol_exponential.py
+++ b/docs/code/sensitivity/sobol/plot_sobol_additive.py
@@ -1,10 +1,10 @@
"""
-Exponential function
+Additive function
==============================================
.. math::
- f(x) := \exp(x_1 + 2x_2), \quad x_1, x_2 \sim \mathcal{N}(0, 1)
+ f(x) = a \cdot X_1 + b \cdot X_2, \quad X_1, X_2 \sim \mathcal{N}(0, 1), \quad a,b \in \mathbb{R}
"""
@@ -15,16 +15,21 @@
from UQpy.distributions.collection.JointIndependent import JointIndependent
from UQpy.sensitivity.sobol import Sobol
-# %%
+# %% [markdown]
+# **Define the model and input distributions**
+
# Create Model object
+a, b = 1, 2
+
model = PythonModel(
- model_script="local_exponential.py",
+ model_script="local_additive.py",
model_object_name="evaluate",
var_names=[
"X_1",
"X_2",
],
delete_files=True,
+ params=[a, b],
)
runmodel_obj = RunModel(model=model)
@@ -33,28 +38,21 @@
dist_object = JointIndependent([Normal(0, 1)] * 2)
# %% [markdown]
-# Compute Sobol indices
+# **Compute Sobol indices**
-# %%
+# %% [markdown]
SA = Sobol(runmodel_obj, dist_object)
-# Compute Sobol indices using the pick and freeze algorithm
-computed_indices = SA.run(
- n_samples=100_000, num_bootstrap_samples=1_000, confidence_level=0.95
-)
+computed_indices = SA.run(n_samples=50_000)
# %% [markdown]
-# Expected first order Sobol indices (computed analytically):
+# **First order Sobol indices**
+#
+# Expected first order Sobol indices:
#
-# X1: 0.0118
+# :math:`\mathrm{S}_1 = \frac{a^2 \cdot \mathbb{V}[X_1]}{a^2 \cdot \mathbb{V}[X_1] + b^2 \cdot \mathbb{V}[X_2]} = \frac{1^2 \cdot 1}{1^2 \cdot 1 + 2^2 \cdot 1} = 0.2`
#
-# X2: 0.3738
+# :math:`\mathrm{S}_2 = \frac{b^2 \cdot \mathbb{V}[X_2]}{a^2 \cdot \mathbb{V}[X_1] + b^2 \cdot \mathbb{V}[X_2]} = \frac{2^2 \cdot 1}{1^2 \cdot 1 + 2^2 \cdot 1} = 0.8`
# %%
computed_indices["sobol_i"]
-
-# %% [markdown]
-# Confidence intervals for first order Sobol indices
-
-# %%
-computed_indices["CI_sobol_i"]
diff --git a/docs/code/sensitivity/sobol/plot_sobol_func.py b/docs/code/sensitivity/sobol/plot_sobol_func.py
index 0f7f7ed0d..d2640955b 100644
--- a/docs/code/sensitivity/sobol/plot_sobol_func.py
+++ b/docs/code/sensitivity/sobol/plot_sobol_func.py
@@ -3,6 +3,10 @@
Sobol function
==============================================
+The Sobol function is non-linear function that is commonly used to benchmark uncertainty
+and senstivity analysis methods. Unlike the Ishigami function which has 3 input
+variables, the Sobol function can have any number of input variables.
+
.. math::
g(x_1, x_2, \ldots, x_D) := \prod_{i=1}^{D} \frac{|4x_i - 2| + a_i}{1 + a_i},
@@ -23,7 +27,9 @@
from UQpy.distributions.collection.JointIndependent import JointIndependent
from UQpy.sensitivity.sobol import Sobol
-# %%
+# %% [markdown]
+# **Define the model and input distributions**
+
# Create Model object
num_vars = 6
a_vals = np.array([0.0, 0.5, 3.0, 9.0, 99.0, 99.0])
@@ -42,83 +48,89 @@
dist_object = JointIndependent([Uniform(0, 1)] * num_vars)
# %% [markdown]
-# #### Compute Sobol indices
+# **Compute Sobol indices**
-# %%
+# %% [markdown]
SA = Sobol(runmodel_obj, dist_object)
# Compute Sobol indices using the pick and freeze algorithm
computed_indices = SA.run(n_samples=50_000, estimate_second_order=True)
# %% [markdown]
-# First order Sobol indices
+# **First order Sobol indices**
+#
+# Expected first order Sobol indices:
#
-# $S_1$ = 5.86781190e-01
+# :math:`S_1` = 5.86781190e-01
#
-# $S_2$ = 2.60791640e-01
+# :math:`S_2` = 2.60791640e-01
#
-# $S_3$ = 3.66738244e-02
+# :math:`S_3` = 3.66738244e-02
#
-# $S_4$ = 5.86781190e-03
+# :math:`S_4` = 5.86781190e-03
#
-# $S_5$ = 5.86781190e-05
+# :math:`S_5` = 5.86781190e-05
#
-# $S_6$ = 5.86781190e-05
+# :math:`S_6` = 5.86781190e-05
# %%
computed_indices["sobol_i"]
# %% [markdown]
-# Total order Sobol indices
+# **Total order Sobol indices**
#
-# $S_{T_1}$ = 6.90085892e-01
+# Expected total order Sobol indices:
#
-# $S_{T_2}$ = 3.56173364e-01
+# :math:`S_{T_1}` = 6.90085892e-01
#
-# $S_{T_3}$ = 5.63335422e-02
+# :math:`S_{T_2}` = 3.56173364e-01
#
-# $S_{T_4}$ = 9.17057664e-03
+# :math:`S_{T_3}` = 5.63335422e-02
#
-# $S_{T_5}$ = 9.20083854e-05
+# :math:`S_{T_4}` = 9.17057664e-03
#
-# $S_{T_6}$ = 9.20083854e-05
+# :math:`S_{T_5}` = 9.20083854e-05
+#
+# :math:`S_{T_6}` = 9.20083854e-05
#
# %%
computed_indices["sobol_total_i"]
# %% [markdown]
-# Second-order Sobol indices
+# **Second order Sobol indices**
+#
+# Expected second order Sobol indices:
#
-# $S_{12}$ = 0.0869305
+# :math:`S_{T_{12}}` = 0.0869305
#
-# $S_{13}$ = 0.0122246
+# :math:`S_{T_{13}}` = 0.0122246
#
-# $S_{14}$ = 0.00195594
+# :math:`S_{T_{14}}` = 0.00195594
#
-# $S_{15}$ = 0.00001956
+# :math:`S_{T_{15}}` = 0.00001956
#
-# $S_{16}$ = 0.00001956
+# :math:`S_{T_{16}}` = 0.00001956
#
-# $S_{23}$ = 0.00543316
+# :math:`S_{T_{23}}` = 0.00543316
#
-# $S_{24}$ = 0.00086931
+# :math:`S_{T_{24}}` = 0.00086931
#
-# $S_{25}$ = 0.00000869
+# :math:`S_{T_{25}}` = 0.00000869
#
-# $S_{26}$ = 0.00000869
+# :math:`S_{T_{26}}` = 0.00000869
#
-# $S_{34}$ = 0.00012225
+# :math:`S_{T_{34}}` = 0.00012225
#
-# $S_{35}$ = 0.00000122
+# :math:`S_{T_{35}}` = 0.00000122
#
-# $S_{36}$ = 0.00000122
+# :math:`S_{T_{36}}` = 0.00000122
#
-# $S_{45}$ = 0.00000020
+# :math:`S_{T_{45}}` = 0.00000020
#
-# $S_{46}$ = 0.00000020
+# :math:`S_{T_{46}}` = 0.00000020
#
-# $S_{56}$ = 2.0e-9
+# :math:`S_{T_{56}}` = 2.0e-9
# %%
computed_indices["sobol_ij"]
diff --git a/docs/code/sensitivity/sobol/plot_sobol_ishigami.py b/docs/code/sensitivity/sobol/plot_sobol_ishigami.py
index f067aa4b3..d04649811 100644
--- a/docs/code/sensitivity/sobol/plot_sobol_ishigami.py
+++ b/docs/code/sensitivity/sobol/plot_sobol_ishigami.py
@@ -3,8 +3,8 @@
Ishigami function
==============================================
-The ishigami function is a non-linear, non-monotonic function that is commonly used in
-uncertainty and senstivity analysis methods.
+The ishigami function is a non-linear, non-monotonic function that is commonly used to
+benchmark uncertainty and senstivity analysis methods.
.. math::
f(x_1, x_2, x_3) = sin(x_1) + a \cdot sin^2(x_2) + b \cdot x_3^4 sin(x_1)
@@ -31,7 +31,7 @@
S_{T_1} = \frac{V_{T1}}{\mathbb{V}[Y]}, \quad S_{T_2} = \frac{V_{T2}}{\mathbb{V}[Y]}, \quad S_{T_3} = \frac{V_{T3}}{\mathbb{V}[Y]}
.. math::
- V_{T1} = 0.5 (1 + \frac{b\pi^4}{5})^2 + \frac{8b^2\pi^8}{225}, \quad V_{T2}= \frac{a^2}{8}, \quad V_{T3} = \frac{8b^2\pi^8}{225}
+ V_{T_1} = 0.5 (1 + \frac{b\pi^4}{5})^2 + \frac{8b^2\pi^8}{225}, \quad V_{T_2}= \frac{a^2}{8}, \quad V_{T_3} = \frac{8b^2\pi^8}{225}
.. math::
\mathbb{V}[Y] = \frac{a^2}{8} + \frac{b\pi^4}{5} + \frac{b^2\pi^8}{18} + \frac{1}{2}
@@ -74,15 +74,15 @@
computed_indices = SA.run(n_samples=100_000, num_bootstrap_samples=100)
# %% [markdown]
-# **Sobol indices**
+# **First order Sobol indices**
#
# Expected first order Sobol indices:
#
-# X1: 0.3139
+# :math:`S_1` = 0.3139
#
-# X2: 0.4424
+# :math:`S_2` = 0.4424
#
-# X3: 0.0
+# :math:`S_3` = 0.0
# %%
computed_indices["sobol_i"]
@@ -92,23 +92,23 @@
#
# Expected total order Sobol indices:
#
-# X1: 0.55758886
+# :math:`S_{T_1}` = 0.55758886
#
-# X2: 0.44241114
+# :math:`S_{T_2}` = 0.44241114
#
-# X3: 0.24368366
+# :math:`S_{T_3}` = 0.24368366
# %%
computed_indices["sobol_total_i"]
# %% [markdown]
-# Confidence intervals for first order Sobol indices
+# **Confidence intervals for first order Sobol indices**
# %%
computed_indices["CI_sobol_i"]
# %% [markdown]
-# Confidence intervals for total order Sobol indices
+# **Confidence intervals for total order Sobol indices**
# %%
computed_indices["CI_sobol_total_i"]
diff --git a/docs/source/sensitivity/sobol.rst b/docs/source/sensitivity/sobol.rst
index fa4d49265..45f20b612 100644
--- a/docs/source/sensitivity/sobol.rst
+++ b/docs/source/sensitivity/sobol.rst
@@ -2,22 +2,22 @@
Sobol indices
----------------------------------------
-Sobol indices are the standard approach to calculate a global variance based sensitivity analysis.
+Sobol indices are the standard approach for performing a global sensitivity analysis.
The indices are based on a variance decomposition of the model output. Using this decomposition allows us to assign the contribution of uncertain inputs to the variance of the model output.
There are three main groups of indices:
-- First order indices (:math:`S_{i}`): Describe the fraction of the output variance due to a single uncertain input parameter. This amount of variance can be reduced if the uncertainty in the corresponding input is eliminated.
+- First order indices (:math:`S_{i}`): Describe the fraction of the output variance due to a single uncertain input parameter :math:`i`. This amount of variance can be reduced if the uncertainty in the corresponding input is eliminated.
- Higher order indices: Describe the fraction of the output variance due to interactions between uncertain input parameters. For example, the second order indices (:math:`S_{ij}`) describe the fraction of the output variance due to interactions between two uncertain input parameters :math:`i` and :math:`j`.
-- Total order indices (:math:`S_{T_{i}}`): Describe the fraction of the output variance due to a single input parameter and all higher order effects the input parameter is involved.
+- Total order indices (:math:`S_{T_{i}}`): Describe the fraction of the output variance due to a single input parameter :math:`i` and all higher order effects of the input parameter.
-If the first order index of an input parameter is equal to the total order index it implies that the parameter is not involved in any interaction effects.
+If the first order index of an input parameter is equal to the total order index it implies that the parameter does not have any interaction effects.
-The Sobol indices are computed using the Pick-and-Freeze approach for single output and multi-output models. Since there are several variants of the Pick-and-Freeze approach, the schemes implemented to compute Sobol indices are listed below:
+The Sobol indices are typically computed using the Pick-and-Freeze approach for single output and multi-output models. Since there are several variants of the Pick-and-Freeze approach, the schemes implemented to compute Sobol indices are listed below:
-(where, :math:`N` is the number of Monte Carlo samples and :math:`m` being the number of input parameters in the model)
+Here, :math:`N` is the number of Monte Carlo samples and :math:`m` being the number of input parameters in the model.
1. **First order indices** (:math:`S_{i}`)
@@ -29,7 +29,7 @@ The Sobol indices are computed using the Pick-and-Freeze approach for single out
.. math::
y_{A}=f(A), \quad y_{C_{i}}=f(C_{i}), \quad f_{0}^{2}=\left(\frac{1}{2N} \sum_{j=1}^{N} y_{A}^{(j)} + y_{C_{i}}^{(j)} \right)^{2}
-Compared to "Sobol1993", the "Janon2014" estimator makes more efficient use of model evaluations and produces better smaller confidence intervals.
+Compared to "Sobol1993", the "Janon2014" estimator makes more efficient use of model evaluations and produces smaller (better) confidence intervals.
- Sobol1993: Requires :math:`N(m + 1)` model evaluations [1]_.
From 12a5b8252676f4bd3ec1bd3641a63a4b358a1d58 Mon Sep 17 00:00:00 2001
From: Prateek Bhustali
Date: Mon, 23 May 2022 12:55:17 +0200
Subject: [PATCH 29/59] Added NumpyIntArray to support int arrays as input
---
src/UQpy/utilities/ValidationTypes.py | 12 +++++++++---
1 file changed, 9 insertions(+), 3 deletions(-)
diff --git a/src/UQpy/utilities/ValidationTypes.py b/src/UQpy/utilities/ValidationTypes.py
index d73abf94e..c52c4f095 100644
--- a/src/UQpy/utilities/ValidationTypes.py
+++ b/src/UQpy/utilities/ValidationTypes.py
@@ -14,9 +14,15 @@
np.ndarray,
Is[lambda array: np.issubdtype(array.dtype, float)],
]
+NumpyIntArray = Annotated[
+ np.ndarray,
+ Is[lambda array: np.issubdtype(array.dtype, int)],
+]
Numpy2DFloatArrayOrthonormal = Annotated[
np.ndarray,
- Is[lambda array: array.ndim == 2 and np.issubdtype(array.dtype, float) and
- np.allclose(array.T @ array, np.eye(array.shape[1]))],
+ Is[
+ lambda array: array.ndim == 2
+ and np.issubdtype(array.dtype, float)
+ and np.allclose(array.T @ array, np.eye(array.shape[1]))
+ ],
]
-
From df66e93ce4abf2cd10189f39ac0ed9d554312a11 Mon Sep 17 00:00:00 2001
From: Prateek Bhustali
Date: Mon, 23 May 2022 12:57:27 +0200
Subject: [PATCH 30/59] Added Type Hints to Chatterjee module
---
src/UQpy/sensitivity/chatterjee.py | 47 ++++++++++++++++++++++++------
1 file changed, 38 insertions(+), 9 deletions(-)
diff --git a/src/UQpy/sensitivity/chatterjee.py b/src/UQpy/sensitivity/chatterjee.py
index 9bea38cdb..cdbd41b32 100644
--- a/src/UQpy/sensitivity/chatterjee.py
+++ b/src/UQpy/sensitivity/chatterjee.py
@@ -22,9 +22,19 @@
import numpy as np
import scipy.stats
+from beartype import beartype
+from typing import Union
+from numbers import Integral
from UQpy.sensitivity.baseclass.sensitivity import Sensitivity
from UQpy.sensitivity.sobol import compute_first_order as compute_first_order_sobol
+from UQpy.utilities.ValidationTypes import (
+ RandomStateType,
+ PositiveInteger,
+ PositiveFloat,
+ NumpyFloatArray,
+ NumpyIntArray,
+)
from UQpy.utilities.UQpyLoggingFormatter import UQpyLoggingFormatter
@@ -80,12 +90,13 @@ def __init__(self, runmodel_object, dist_object, random_state=None, **kwargs):
self.n_samples = None
"Number of samples used to estimate the sensitivity indices, :class:`int`"
+ @beartype
def run(
self,
- n_samples=1_000,
- estimate_sobol_indices=False,
- num_bootstrap_samples=None,
- confidence_level=0.95,
+ n_samples: PositiveInteger = 1_000,
+ estimate_sobol_indices: bool = False,
+ num_bootstrap_samples: PositiveInteger = None,
+ confidence_level: PositiveFloat = 0.95,
):
"""
Compute the sensitivity indices using the Chatterjee method.
@@ -191,7 +202,12 @@ def run(
return computed_indices
@staticmethod
- def compute_chatterjee_indices(X, Y, seed=None):
+ @beartype
+ def compute_chatterjee_indices(
+ X: Union[NumpyFloatArray, NumpyIntArray],
+ Y: Union[NumpyFloatArray, NumpyIntArray],
+ seed: RandomStateType = None,
+ ):
r"""
Compute the Chatterjee sensitivity indices
@@ -259,7 +275,10 @@ def compute_chatterjee_indices(X, Y, seed=None):
return chatterjee_indices
@staticmethod
- def rank_analog_to_pickfreeze(X, j):
+ @beartype
+ def rank_analog_to_pickfreeze(
+ X: Union[NumpyFloatArray, NumpyIntArray], j: Integral
+ ):
r"""
Computing the :math:`N(j)` for each :math:`j \in \{1, \ldots, n\}`
as in eq.(8) in [6]_, where :math:`n` is the size of :math:`X`.
@@ -304,7 +323,8 @@ def rank_analog_to_pickfreeze(X, j):
return np.where(rank_X == 0)[0][0]
@staticmethod
- def rank_analog_to_pickfreeze_vec(X):
+ @beartype
+ def rank_analog_to_pickfreeze_vec(X: Union[NumpyFloatArray, NumpyIntArray]):
r"""
Computing the :math:`N(j)` for each :math:`j \in \{1, \ldots, n\}`
in a vectorized manner., where :math:`n` is the size of :math:`X`.
@@ -372,7 +392,11 @@ def rank_analog_to_pickfreeze_vec(X):
return N_func.astype(int)
@staticmethod
- def compute_Sobol_indices(A_model_evals, C_i_model_evals):
+ @beartype
+ def compute_Sobol_indices(
+ A_model_evals: Union[NumpyFloatArray, NumpyIntArray],
+ C_i_model_evals: Union[NumpyFloatArray, NumpyIntArray],
+ ):
r"""
A method to estimate the first order Sobol indices using
the Chatterjee method.
@@ -410,7 +434,12 @@ def compute_Sobol_indices(A_model_evals, C_i_model_evals):
return first_order_sobol
- def compute_rank_analog_of_f_C_i(self, A_samples, A_model_evals):
+ @beartype
+ def compute_rank_analog_of_f_C_i(
+ self,
+ A_samples: Union[NumpyFloatArray, NumpyIntArray],
+ A_model_evals: Union[NumpyFloatArray, NumpyIntArray],
+ ):
r"""
In the Pick and Freeze method, we use model evaluations
:math:`f_A`, :math:`f_B`, :math:`f_{C_{i}}`
From 358440614de4d3fb4b42de178b8c5965ef89dd0e Mon Sep 17 00:00:00 2001
From: Prateek Bhustali
Date: Mon, 23 May 2022 12:57:37 +0200
Subject: [PATCH 31/59] Added Type Hints to CVM module
---
src/UQpy/sensitivity/cramer_von_mises.py | 41 +++++++++++++++++-------
1 file changed, 29 insertions(+), 12 deletions(-)
diff --git a/src/UQpy/sensitivity/cramer_von_mises.py b/src/UQpy/sensitivity/cramer_von_mises.py
index 66421b440..b4e2bebd6 100644
--- a/src/UQpy/sensitivity/cramer_von_mises.py
+++ b/src/UQpy/sensitivity/cramer_von_mises.py
@@ -15,14 +15,23 @@
"""
import logging
+from typing import Union
import numpy as np
+from beartype import beartype
from UQpy.sensitivity.baseclass.sensitivity import Sensitivity
from UQpy.sensitivity.baseclass.pickfreeze import generate_pick_freeze_samples
from UQpy.sensitivity.sobol import compute_first_order as compute_first_order_sobol
from UQpy.sensitivity.sobol import compute_total_order as compute_total_order_sobol
from UQpy.utilities.UQpyLoggingFormatter import UQpyLoggingFormatter
+from UQpy.utilities.ValidationTypes import (
+ PositiveInteger,
+ PositiveFloat,
+ NumpyFloatArray,
+ NumpyIntArray,
+)
+
# TODO: Sampling strategies
@@ -85,13 +94,14 @@ def __init__(
self.num_vars = None
"Number of input random variables, :class:`int`"
+ @beartype
def run(
self,
- n_samples=1_000,
- estimate_sobol_indices=False,
- num_bootstrap_samples=None,
- confidence_level=0.95,
- disable_CVM_indices=False,
+ n_samples: PositiveInteger = 1_000,
+ estimate_sobol_indices: bool = False,
+ num_bootstrap_samples: PositiveInteger = None,
+ confidence_level: PositiveFloat = 0.95,
+ disable_CVM_indices: bool = False,
):
"""
@@ -243,7 +253,8 @@ def run(
return computed_indices
@staticmethod
- def indicator_function(Y, W):
+ @beartype
+ def indicator_function(Y: Union[NumpyFloatArray, NumpyIntArray], w: float):
"""
Vectorized version of the indicator function.
@@ -253,22 +264,28 @@ def indicator_function(Y, W):
**Inputs:**
* **Y** (`ndarray`):
- Vector of values of the random variable.
+ Array of values of the random variable.
Shape: `(N, 1)`
- * **W** (`ndarray`):
- Vector of values of the random variable.
- Shape: `(N, 1)`
+ * **w** (`float`):
+ Value to compare with the array.
**Outputs:**
* **indicator** (`ndarray`):
+ Array of integers with truth values.
Shape: `(N, 1)`
"""
- return (Y <= W.T).astype(int)
+ return (Y <= w).astype(int)
- def pick_and_freeze_estimator(self, A_model_evals, W_model_evals, C_i_model_evals):
+ @beartype
+ def pick_and_freeze_estimator(
+ self,
+ A_model_evals: Union[NumpyFloatArray, NumpyIntArray],
+ W_model_evals: Union[NumpyFloatArray, NumpyIntArray],
+ C_i_model_evals: Union[NumpyFloatArray, NumpyIntArray],
+ ):
"""
Compute the first order Cramér-von Mises indices
From 9fcda77409fc41079c95ddf697253c716d420a0c Mon Sep 17 00:00:00 2001
From: Prateek Bhustali
Date: Mon, 23 May 2022 12:57:56 +0200
Subject: [PATCH 32/59] Added Type Hints to GSI module
---
src/UQpy/sensitivity/generalised_sobol.py | 26 ++++++++++++++++++-----
1 file changed, 21 insertions(+), 5 deletions(-)
diff --git a/src/UQpy/sensitivity/generalised_sobol.py b/src/UQpy/sensitivity/generalised_sobol.py
index e5cf2f654..5311941b6 100644
--- a/src/UQpy/sensitivity/generalised_sobol.py
+++ b/src/UQpy/sensitivity/generalised_sobol.py
@@ -26,9 +26,18 @@
import numpy as np
+from typing import Union
+from beartype import beartype
+
from UQpy.sensitivity.baseclass.sensitivity import Sensitivity
from UQpy.sensitivity.baseclass.pickfreeze import generate_pick_freeze_samples
from UQpy.utilities.UQpyLoggingFormatter import UQpyLoggingFormatter
+from UQpy.utilities.ValidationTypes import (
+ PositiveFloat,
+ PositiveInteger,
+ NumpyFloatArray,
+ NumpyIntArray,
+)
class GeneralisedSobol(Sensitivity):
@@ -80,11 +89,12 @@ def __init__(
self.num_vars = None
"Number of model input variables, :class:`int`"
+ @beartype
def run(
self,
- n_samples=1_000,
- num_bootstrap_samples=None,
- confidence_level=0.95,
+ n_samples: PositiveInteger = 1_000,
+ num_bootstrap_samples: PositiveInteger = None,
+ confidence_level: PositiveFloat = 0.95,
):
"""
@@ -247,8 +257,11 @@ def run(
return computed_indices
@staticmethod
+ @beartype
def compute_first_order_generalised_sobol_indices(
- A_model_evals, B_model_evals, C_i_model_evals
+ A_model_evals: Union[NumpyFloatArray, NumpyIntArray],
+ B_model_evals: Union[NumpyFloatArray, NumpyIntArray],
+ C_i_model_evals: Union[NumpyFloatArray, NumpyIntArray],
):
"""
@@ -313,8 +326,11 @@ def compute_first_order_generalised_sobol_indices(
return gen_sobol_i
@staticmethod
+ @beartype
def compute_total_order_generalised_sobol_indices(
- A_model_evals, B_model_evals, C_i_model_evals
+ A_model_evals: Union[NumpyFloatArray, NumpyIntArray],
+ B_model_evals: Union[NumpyFloatArray, NumpyIntArray],
+ C_i_model_evals: Union[NumpyFloatArray, NumpyIntArray],
):
"""
From 13d229e128489f550f5baa9ede391dd60256aa83 Mon Sep 17 00:00:00 2001
From: Prateek Bhustali
Date: Mon, 23 May 2022 12:58:29 +0200
Subject: [PATCH 33/59] Added Type Hints to Sobol module
---
src/UQpy/sensitivity/sobol.py | 56 +++++++++++++++++++++--------------
1 file changed, 34 insertions(+), 22 deletions(-)
diff --git a/src/UQpy/sensitivity/sobol.py b/src/UQpy/sensitivity/sobol.py
index 0eb327aaf..3566a6fa4 100644
--- a/src/UQpy/sensitivity/sobol.py
+++ b/src/UQpy/sensitivity/sobol.py
@@ -50,12 +50,20 @@
import math
import logging
import itertools
+from typing import Union
import numpy as np
+from beartype import beartype
from UQpy.sensitivity.baseclass.sensitivity import Sensitivity
from UQpy.sensitivity.baseclass.pickfreeze import generate_pick_freeze_samples
from UQpy.utilities.UQpyLoggingFormatter import UQpyLoggingFormatter
+from UQpy.utilities.ValidationTypes import (
+ PositiveInteger,
+ PositiveFloat,
+ NumpyFloatArray,
+ NumpyIntArray,
+)
# TODO: Sampling strategies
@@ -128,15 +136,16 @@ def __init__(
self.multioutput = None
"True if the model has multiple outputs, :class:`bool`"
+ @beartype
def run(
self,
- n_samples=1_000,
- num_bootstrap_samples=None,
- confidence_level=0.95,
- estimate_second_order=False,
- first_order_scheme="Janon2014",
- total_order_scheme="Homma1996",
- second_order_scheme="Saltelli2002",
+ n_samples: PositiveInteger = 1_000,
+ num_bootstrap_samples: PositiveInteger = None,
+ confidence_level: PositiveFloat = 0.95,
+ estimate_second_order: bool = False,
+ first_order_scheme: str = "Janon2014",
+ total_order_scheme: str = "Homma1996",
+ second_order_scheme: str = "Saltelli2002",
):
"""
@@ -500,12 +509,13 @@ def run(
"""
+@beartype
def compute_first_order(
- A_model_evals,
- B_model_evals,
- C_i_model_evals,
- D_i_model_evals=None,
- scheme="Janon2014",
+ A_model_evals: Union[NumpyFloatArray, NumpyIntArray],
+ B_model_evals: Union[NumpyFloatArray, NumpyIntArray, None],
+ C_i_model_evals: NumpyFloatArray,
+ D_i_model_evals: Union[NumpyFloatArray, NumpyIntArray, None] = None,
+ scheme: str = "Janon2014",
):
"""
@@ -655,12 +665,13 @@ def compute_first_order(
return first_order_sobol
+@beartype
def compute_total_order(
- A_model_evals,
- B_model_evals,
- C_i_model_evals,
- D_i_model_evals=None,
- scheme="Homma1996",
+ A_model_evals: Union[NumpyFloatArray, NumpyIntArray],
+ B_model_evals: Union[NumpyFloatArray, NumpyIntArray, None],
+ C_i_model_evals: Union[NumpyFloatArray, NumpyIntArray],
+ D_i_model_evals: Union[NumpyFloatArray, NumpyIntArray, None] = None,
+ scheme: str = "Homma1996",
):
"""
@@ -759,13 +770,14 @@ def compute_total_order(
return total_order_sobol
+@beartype
def compute_second_order(
- A_model_evals,
- B_model_evals,
- C_i_model_evals,
- D_i_model_evals,
+ A_model_evals: Union[NumpyFloatArray, NumpyIntArray],
+ B_model_evals: Union[NumpyFloatArray, NumpyIntArray],
+ C_i_model_evals: Union[NumpyFloatArray, NumpyIntArray],
+ D_i_model_evals: Union[NumpyFloatArray, NumpyIntArray],
first_order_sobol=None, # None to make it a make keyword argument
- scheme="Saltelli2002",
+ scheme: str = "Saltelli2002",
):
"""
Compute the second order Sobol indices using the Pick-and-Freeze scheme.
From aff7d0965a335b342a367f5eadcd48ca1f5fdcd8 Mon Sep 17 00:00:00 2001
From: Prateek Bhustali
Date: Mon, 23 May 2022 12:59:07 +0200
Subject: [PATCH 34/59] Added Type Hints to baseclass modules
---
src/UQpy/sensitivity/baseclass/pickfreeze.py | 27 +++++++-
src/UQpy/sensitivity/baseclass/sensitivity.py | 68 ++++++++-----------
2 files changed, 55 insertions(+), 40 deletions(-)
diff --git a/src/UQpy/sensitivity/baseclass/pickfreeze.py b/src/UQpy/sensitivity/baseclass/pickfreeze.py
index 4e9e2f57e..9806f76b5 100644
--- a/src/UQpy/sensitivity/baseclass/pickfreeze.py
+++ b/src/UQpy/sensitivity/baseclass/pickfreeze.py
@@ -1,11 +1,36 @@
import copy
+from typing import Union
+from beartype import beartype
-def generate_pick_freeze_samples(dist_obj, n_samples, random_state=None):
+from UQpy.distributions.collection import JointIndependent
+from UQpy.utilities.ValidationTypes import (
+ RandomStateType,
+ PositiveInteger,
+)
+
+
+@beartype
+def generate_pick_freeze_samples(
+ dist_obj: Union[JointIndependent, Union[list, tuple]],
+ n_samples: PositiveInteger,
+ random_state: RandomStateType = None,
+):
"""
Generate samples to be used in the Pick-and-Freeze algorithm.
+ **Inputs**:
+
+ * **dist_obj** (`JointIndependent` or `list` or `tuple`):
+ A distribution object or a list or tuple of distribution objects.
+
+ * **n_samples** (`int`):
+ The number of samples to be generated.
+
+ * **random_state** (`None` or `int` or `numpy.random.RandomState`):
+ A random seed or a `numpy.random.RandomState` object.
+
**Outputs:**
* **A_samples** (`ndarray`):
diff --git a/src/UQpy/sensitivity/baseclass/sensitivity.py b/src/UQpy/sensitivity/baseclass/sensitivity.py
index af2adc594..971fc9e7d 100644
--- a/src/UQpy/sensitivity/baseclass/sensitivity.py
+++ b/src/UQpy/sensitivity/baseclass/sensitivity.py
@@ -13,52 +13,38 @@
import numpy as np
import scipy.stats
+from typing import Union
+from beartype import beartype
+
+from UQpy.distributions import *
+from UQpy.utilities.ValidationTypes import (
+ PositiveFloat,
+ RandomStateType,
+ PositiveInteger,
+ NumpyFloatArray,
+ NumpyIntArray,
+)
from UQpy.run_model import RunModel
-from UQpy.distributions.baseclass import DistributionContinuous1D
from UQpy.distributions.collection import JointIndependent
class Sensitivity:
+ @beartype
def __init__(
- self, runmodel_object, dist_object, random_state=None, **kwargs
+ self,
+ runmodel_object: RunModel,
+ dist_object: Union[JointIndependent, Union[list, tuple]],
+ random_state: RandomStateType = None,
+ **kwargs,
) -> None:
- # Check RunModel object
- if not isinstance(runmodel_object, RunModel):
- raise TypeError("UQpy: runmodel_object must be an object of class RunModel")
-
self.runmodel_object = runmodel_object
-
- # Check distributions
- if isinstance(dist_object, list):
- for i in range(len(dist_object)):
- if not isinstance(dist_object[i], (DistributionContinuous1D, JointIndependent)):
- raise TypeError(
- "UQpy: A ``DistributionContinuous1D`` or ``JointInd`` object "
- "must be provided."
- )
- else:
- if not isinstance(dist_object, (DistributionContinuous1D, JointIndependent)):
- raise TypeError(
- "UQpy: A ``DistributionContinuous1D`` or ``JointInd`` object must be provided."
- )
-
self.dist_object = dist_object
-
- # Check random state
self.random_state = random_state
- if isinstance(self.random_state, int):
- self.random_state = np.random.RandomState(self.random_state)
- elif not (
- self.random_state is None
- or isinstance(self.random_state, np.random.RandomState)
- ):
- raise TypeError(
- "UQpy: random state should be None, an integer or np.random.RandomState object"
- )
# wrapper created for convenience to generate model evaluations
- def _run_model(self, samples):
+ @beartype
+ def _run_model(self, samples: Union[NumpyFloatArray, NumpyIntArray]):
"""Generate model evaluations for a set of samples.
**Inputs**:
@@ -83,7 +69,8 @@ def _run_model(self, samples):
return model_evals
@staticmethod
- def bootstrap_sample_generator_1D(samples):
+ @beartype
+ def bootstrap_sample_generator_1D(samples: Union[NumpyFloatArray, NumpyIntArray]):
"""Generate bootstrap samples.
Generators are used to avoid copying the entire array.
@@ -113,7 +100,8 @@ def bootstrap_sample_generator_1D(samples):
yield samples[_indices]
@staticmethod
- def bootstrap_sample_generator_2D(samples):
+ @beartype
+ def bootstrap_sample_generator_2D(samples: Union[NumpyFloatArray, NumpyIntArray]):
"""Generate bootstrap samples.
Generators are used to avoid copying the entire array.
@@ -156,7 +144,8 @@ def bootstrap_sample_generator_2D(samples):
yield samples[_indices, cols]
@staticmethod
- def bootstrap_sample_generator_3D(samples):
+ @beartype
+ def bootstrap_sample_generator_3D(samples: Union[NumpyFloatArray, NumpyIntArray]):
"""Generate bootstrap samples.
Generators are used to avoid copying the entire array.
@@ -190,13 +179,14 @@ def bootstrap_sample_generator_3D(samples):
yield samples[:, _indices, cols]
+ @beartype
def bootstrapping(
self,
estimator,
estimator_inputs,
- qoi_mean,
- num_bootstrap_samples,
- confidence_level=0.95,
+ qoi_mean: Union[NumpyFloatArray, NumpyIntArray],
+ num_bootstrap_samples: PositiveInteger = None,
+ confidence_level: PositiveFloat = 0.95,
**kwargs,
):
From 4ceb33c9193e56c3637057fd403022d07202d6a4 Mon Sep 17 00:00:00 2001
From: Prateek Bhustali
Date: Mon, 23 May 2022 13:14:00 +0200
Subject: [PATCH 35/59] Changed Chatterjee module name to CamelCase
---
docs/code/sensitivity/chatterjee/plot_chatterjee_exponential.py | 2 +-
docs/code/sensitivity/chatterjee/plot_chatterjee_ishigami.py | 2 +-
docs/code/sensitivity/chatterjee/plot_chatterjee_sobol_func.py | 2 +-
src/UQpy/sensitivity/{chatterjee.py => Chatterjee.py} | 0
src/UQpy/sensitivity/__init__.py | 2 +-
tests/unit_tests/sensitivity/test_chatterjee.py | 2 +-
6 files changed, 5 insertions(+), 5 deletions(-)
rename src/UQpy/sensitivity/{chatterjee.py => Chatterjee.py} (100%)
diff --git a/docs/code/sensitivity/chatterjee/plot_chatterjee_exponential.py b/docs/code/sensitivity/chatterjee/plot_chatterjee_exponential.py
index 81d752653..8fa879847 100644
--- a/docs/code/sensitivity/chatterjee/plot_chatterjee_exponential.py
+++ b/docs/code/sensitivity/chatterjee/plot_chatterjee_exponential.py
@@ -21,7 +21,7 @@
from UQpy.run_model.model_execution.PythonModel import PythonModel
from UQpy.distributions import Normal
from UQpy.distributions.collection.JointIndependent import JointIndependent
-from UQpy.sensitivity.chatterjee import Chatterjee
+from UQpy.sensitivity.Chatterjee import Chatterjee
# %% [markdown]
# **Define the model and input distributions**
diff --git a/docs/code/sensitivity/chatterjee/plot_chatterjee_ishigami.py b/docs/code/sensitivity/chatterjee/plot_chatterjee_ishigami.py
index d3759fc10..448309e3a 100644
--- a/docs/code/sensitivity/chatterjee/plot_chatterjee_ishigami.py
+++ b/docs/code/sensitivity/chatterjee/plot_chatterjee_ishigami.py
@@ -21,7 +21,7 @@
from UQpy.run_model.model_execution.PythonModel import PythonModel
from UQpy.distributions import Uniform
from UQpy.distributions.collection.JointIndependent import JointIndependent
-from UQpy.sensitivity.chatterjee import Chatterjee
+from UQpy.sensitivity.Chatterjee import Chatterjee
# %% [markdown]
# **Define the model and input distributions**
diff --git a/docs/code/sensitivity/chatterjee/plot_chatterjee_sobol_func.py b/docs/code/sensitivity/chatterjee/plot_chatterjee_sobol_func.py
index 0169597e1..439ffaa85 100644
--- a/docs/code/sensitivity/chatterjee/plot_chatterjee_sobol_func.py
+++ b/docs/code/sensitivity/chatterjee/plot_chatterjee_sobol_func.py
@@ -32,7 +32,7 @@
from UQpy.run_model.model_execution.PythonModel import PythonModel
from UQpy.distributions import Uniform
from UQpy.distributions.collection.JointIndependent import JointIndependent
-from UQpy.sensitivity.chatterjee import Chatterjee
+from UQpy.sensitivity.Chatterjee import Chatterjee
# %% [markdown]
# **Define the model and input distributions**
diff --git a/src/UQpy/sensitivity/chatterjee.py b/src/UQpy/sensitivity/Chatterjee.py
similarity index 100%
rename from src/UQpy/sensitivity/chatterjee.py
rename to src/UQpy/sensitivity/Chatterjee.py
diff --git a/src/UQpy/sensitivity/__init__.py b/src/UQpy/sensitivity/__init__.py
index e25335a34..01d399fda 100644
--- a/src/UQpy/sensitivity/__init__.py
+++ b/src/UQpy/sensitivity/__init__.py
@@ -2,7 +2,7 @@
from UQpy.sensitivity.PceSensitivity import PceSensitivity
from UQpy.sensitivity.sobol import Sobol
from UQpy.sensitivity.cramer_von_mises import CramervonMises
-from UQpy.sensitivity.chatterjee import Chatterjee
+from UQpy.sensitivity.Chatterjee import Chatterjee
from UQpy.sensitivity.generalised_sobol import GeneralisedSobol
from . import MorrisSensitivity
diff --git a/tests/unit_tests/sensitivity/test_chatterjee.py b/tests/unit_tests/sensitivity/test_chatterjee.py
index 8a7c6495f..5912cdb2c 100644
--- a/tests/unit_tests/sensitivity/test_chatterjee.py
+++ b/tests/unit_tests/sensitivity/test_chatterjee.py
@@ -49,7 +49,7 @@
from UQpy.run_model.model_execution.PythonModel import PythonModel
from UQpy.distributions import Uniform, Normal
from UQpy.distributions.collection.JointIndependent import JointIndependent
-from UQpy.sensitivity.chatterjee import Chatterjee
+from UQpy.sensitivity.Chatterjee import Chatterjee
# Prepare
From 78ba451e7c00db6b6e1ac6320428230a2f870df1 Mon Sep 17 00:00:00 2001
From: Prateek Bhustali
Date: Mon, 23 May 2022 13:16:52 +0200
Subject: [PATCH 36/59] Changed CVM module name to CamelCase
---
docs/code/sensitivity/cramer_von_mises/plot_cvm_exponential.py | 2 +-
docs/code/sensitivity/cramer_von_mises/plot_cvm_sobol_func.py | 2 +-
src/UQpy/sensitivity/{cramer_von_mises.py => CramervonMises.py} | 0
src/UQpy/sensitivity/__init__.py | 2 +-
tests/unit_tests/sensitivity/test_cramer_von_mises.py | 2 +-
5 files changed, 4 insertions(+), 4 deletions(-)
rename src/UQpy/sensitivity/{cramer_von_mises.py => CramervonMises.py} (100%)
diff --git a/docs/code/sensitivity/cramer_von_mises/plot_cvm_exponential.py b/docs/code/sensitivity/cramer_von_mises/plot_cvm_exponential.py
index e6949a71b..28b390a47 100644
--- a/docs/code/sensitivity/cramer_von_mises/plot_cvm_exponential.py
+++ b/docs/code/sensitivity/cramer_von_mises/plot_cvm_exponential.py
@@ -20,7 +20,7 @@
from UQpy.run_model.model_execution.PythonModel import PythonModel
from UQpy.distributions import Normal
from UQpy.distributions.collection.JointIndependent import JointIndependent
-from UQpy.sensitivity.cramer_von_mises import CramervonMises as cvm
+from UQpy.sensitivity.CramervonMises import CramervonMises as cvm
# %% [markdown]
# **Define the model and input distributions**
diff --git a/docs/code/sensitivity/cramer_von_mises/plot_cvm_sobol_func.py b/docs/code/sensitivity/cramer_von_mises/plot_cvm_sobol_func.py
index ff86ab30e..7500c7259 100644
--- a/docs/code/sensitivity/cramer_von_mises/plot_cvm_sobol_func.py
+++ b/docs/code/sensitivity/cramer_von_mises/plot_cvm_sobol_func.py
@@ -25,7 +25,7 @@
from UQpy.run_model.model_execution.PythonModel import PythonModel
from UQpy.distributions import Uniform
from UQpy.distributions.collection.JointIndependent import JointIndependent
-from UQpy.sensitivity.cramer_von_mises import CramervonMises as cvm
+from UQpy.sensitivity.CramervonMises import CramervonMises as cvm
# %% [markdown]
# **Define the model and input distributions**
diff --git a/src/UQpy/sensitivity/cramer_von_mises.py b/src/UQpy/sensitivity/CramervonMises.py
similarity index 100%
rename from src/UQpy/sensitivity/cramer_von_mises.py
rename to src/UQpy/sensitivity/CramervonMises.py
diff --git a/src/UQpy/sensitivity/__init__.py b/src/UQpy/sensitivity/__init__.py
index 01d399fda..15bddfb02 100644
--- a/src/UQpy/sensitivity/__init__.py
+++ b/src/UQpy/sensitivity/__init__.py
@@ -1,7 +1,7 @@
from UQpy.sensitivity.MorrisSensitivity import MorrisSensitivity
from UQpy.sensitivity.PceSensitivity import PceSensitivity
from UQpy.sensitivity.sobol import Sobol
-from UQpy.sensitivity.cramer_von_mises import CramervonMises
+from UQpy.sensitivity.CramervonMises import CramervonMises
from UQpy.sensitivity.Chatterjee import Chatterjee
from UQpy.sensitivity.generalised_sobol import GeneralisedSobol
diff --git a/tests/unit_tests/sensitivity/test_cramer_von_mises.py b/tests/unit_tests/sensitivity/test_cramer_von_mises.py
index 46cebb429..c94ddbae0 100644
--- a/tests/unit_tests/sensitivity/test_cramer_von_mises.py
+++ b/tests/unit_tests/sensitivity/test_cramer_von_mises.py
@@ -47,7 +47,7 @@
from UQpy.run_model.model_execution.PythonModel import PythonModel
from UQpy.distributions import Normal, Uniform
from UQpy.distributions.collection.JointIndependent import JointIndependent
-from UQpy.sensitivity.cramer_von_mises import CramervonMises
+from UQpy.sensitivity.CramervonMises import CramervonMises
# Prepare
###############################################################################
From f9eec9ac7896b0e50999b7f11a469ad51fb5b27a Mon Sep 17 00:00:00 2001
From: Prateek Bhustali
Date: Mon, 23 May 2022 13:19:28 +0200
Subject: [PATCH 37/59] Changed GSI module name to CamelCase
---
.../plot_generalised_sobol_mechanical_oscillator_ODE.py | 2 +-
.../generalised_sobol/plot_generalised_sobol_multioutput.py | 2 +-
.../sensitivity/{generalised_sobol.py => GeneralisedSobol.py} | 0
src/UQpy/sensitivity/__init__.py | 2 +-
tests/unit_tests/sensitivity/test_generalised_sobol.py | 2 +-
5 files changed, 4 insertions(+), 4 deletions(-)
rename src/UQpy/sensitivity/{generalised_sobol.py => GeneralisedSobol.py} (100%)
diff --git a/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_mechanical_oscillator_ODE.py b/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_mechanical_oscillator_ODE.py
index 361bde6aa..716c498f7 100644
--- a/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_mechanical_oscillator_ODE.py
+++ b/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_mechanical_oscillator_ODE.py
@@ -29,7 +29,7 @@
from UQpy.run_model.model_execution.PythonModel import PythonModel
from UQpy.distributions import Uniform, Normal
from UQpy.distributions.collection.JointIndependent import JointIndependent
-from UQpy.sensitivity.generalised_sobol import GeneralisedSobol
+from UQpy.sensitivity.GeneralisedSobol import GeneralisedSobol
# %% [markdown]
# **Define the model and input distributions**
diff --git a/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_multioutput.py b/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_multioutput.py
index 1b673ddcf..af4ca6ff3 100644
--- a/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_multioutput.py
+++ b/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_multioutput.py
@@ -22,7 +22,7 @@
from UQpy.run_model.model_execution.PythonModel import PythonModel
from UQpy.distributions import Uniform, Normal
from UQpy.distributions.collection.JointIndependent import JointIndependent
-from UQpy.sensitivity.generalised_sobol import GeneralisedSobol
+from UQpy.sensitivity.GeneralisedSobol import GeneralisedSobol
# %% [markdown]
# **Define the model and input distributions**
diff --git a/src/UQpy/sensitivity/generalised_sobol.py b/src/UQpy/sensitivity/GeneralisedSobol.py
similarity index 100%
rename from src/UQpy/sensitivity/generalised_sobol.py
rename to src/UQpy/sensitivity/GeneralisedSobol.py
diff --git a/src/UQpy/sensitivity/__init__.py b/src/UQpy/sensitivity/__init__.py
index 15bddfb02..f391bebd2 100644
--- a/src/UQpy/sensitivity/__init__.py
+++ b/src/UQpy/sensitivity/__init__.py
@@ -3,7 +3,7 @@
from UQpy.sensitivity.sobol import Sobol
from UQpy.sensitivity.CramervonMises import CramervonMises
from UQpy.sensitivity.Chatterjee import Chatterjee
-from UQpy.sensitivity.generalised_sobol import GeneralisedSobol
+from UQpy.sensitivity.GeneralisedSobol import GeneralisedSobol
from . import MorrisSensitivity
from . import PceSensitivity
diff --git a/tests/unit_tests/sensitivity/test_generalised_sobol.py b/tests/unit_tests/sensitivity/test_generalised_sobol.py
index c759d85bb..3b5df3167 100644
--- a/tests/unit_tests/sensitivity/test_generalised_sobol.py
+++ b/tests/unit_tests/sensitivity/test_generalised_sobol.py
@@ -50,7 +50,7 @@
from UQpy.run_model.model_execution.PythonModel import PythonModel
from UQpy.distributions import Uniform, Normal
from UQpy.distributions.collection.JointIndependent import JointIndependent
-from UQpy.sensitivity.generalised_sobol import GeneralisedSobol
+from UQpy.sensitivity.GeneralisedSobol import GeneralisedSobol
# Prepare
###############################################################################
From bfa2c53e0b7252f84d2841c33652618d79e064a0 Mon Sep 17 00:00:00 2001
From: Prateek Bhustali
Date: Mon, 23 May 2022 13:20:51 +0200
Subject: [PATCH 38/59] Changed Sobol module name to CamelCase
---
docs/code/sensitivity/sobol/plot_mechanical_oscillator_ODE.py | 2 +-
docs/code/sensitivity/sobol/plot_sobol_additive.py | 2 +-
docs/code/sensitivity/sobol/plot_sobol_func.py | 2 +-
docs/code/sensitivity/sobol/plot_sobol_ishigami.py | 2 +-
src/UQpy/sensitivity/Chatterjee.py | 2 +-
src/UQpy/sensitivity/CramervonMises.py | 4 ++--
src/UQpy/sensitivity/{sobol.py => Sobol.py} | 0
src/UQpy/sensitivity/__init__.py | 2 +-
tests/unit_tests/sensitivity/test_baseclass.py | 2 +-
tests/unit_tests/sensitivity/test_sobol.py | 2 +-
10 files changed, 10 insertions(+), 10 deletions(-)
rename src/UQpy/sensitivity/{sobol.py => Sobol.py} (100%)
diff --git a/docs/code/sensitivity/sobol/plot_mechanical_oscillator_ODE.py b/docs/code/sensitivity/sobol/plot_mechanical_oscillator_ODE.py
index 05636321f..06d1a66b1 100644
--- a/docs/code/sensitivity/sobol/plot_mechanical_oscillator_ODE.py
+++ b/docs/code/sensitivity/sobol/plot_mechanical_oscillator_ODE.py
@@ -30,7 +30,7 @@
from UQpy.run_model.model_execution.PythonModel import PythonModel
from UQpy.distributions import Uniform
from UQpy.distributions.collection.JointIndependent import JointIndependent
-from UQpy.sensitivity.sobol import Sobol
+from UQpy.sensitivity.Sobol import Sobol
# %% [markdown]
# **Define the model and input distributions**
diff --git a/docs/code/sensitivity/sobol/plot_sobol_additive.py b/docs/code/sensitivity/sobol/plot_sobol_additive.py
index 4645ed749..973e97dd6 100644
--- a/docs/code/sensitivity/sobol/plot_sobol_additive.py
+++ b/docs/code/sensitivity/sobol/plot_sobol_additive.py
@@ -13,7 +13,7 @@
from UQpy.run_model.model_execution.PythonModel import PythonModel
from UQpy.distributions import Normal
from UQpy.distributions.collection.JointIndependent import JointIndependent
-from UQpy.sensitivity.sobol import Sobol
+from UQpy.sensitivity.Sobol import Sobol
# %% [markdown]
# **Define the model and input distributions**
diff --git a/docs/code/sensitivity/sobol/plot_sobol_func.py b/docs/code/sensitivity/sobol/plot_sobol_func.py
index d2640955b..5a5cb9389 100644
--- a/docs/code/sensitivity/sobol/plot_sobol_func.py
+++ b/docs/code/sensitivity/sobol/plot_sobol_func.py
@@ -25,7 +25,7 @@
from UQpy.run_model.model_execution.PythonModel import PythonModel
from UQpy.distributions import Uniform
from UQpy.distributions.collection.JointIndependent import JointIndependent
-from UQpy.sensitivity.sobol import Sobol
+from UQpy.sensitivity.Sobol import Sobol
# %% [markdown]
# **Define the model and input distributions**
diff --git a/docs/code/sensitivity/sobol/plot_sobol_ishigami.py b/docs/code/sensitivity/sobol/plot_sobol_ishigami.py
index d04649811..dc118034f 100644
--- a/docs/code/sensitivity/sobol/plot_sobol_ishigami.py
+++ b/docs/code/sensitivity/sobol/plot_sobol_ishigami.py
@@ -45,7 +45,7 @@
from UQpy.run_model.model_execution.PythonModel import PythonModel
from UQpy.distributions import Uniform
from UQpy.distributions.collection.JointIndependent import JointIndependent
-from UQpy.sensitivity.sobol import Sobol
+from UQpy.sensitivity.Sobol import Sobol
# %% [markdown]
# **Define the model and input distributions**
diff --git a/src/UQpy/sensitivity/Chatterjee.py b/src/UQpy/sensitivity/Chatterjee.py
index cdbd41b32..d694d59db 100644
--- a/src/UQpy/sensitivity/Chatterjee.py
+++ b/src/UQpy/sensitivity/Chatterjee.py
@@ -27,7 +27,7 @@
from numbers import Integral
from UQpy.sensitivity.baseclass.sensitivity import Sensitivity
-from UQpy.sensitivity.sobol import compute_first_order as compute_first_order_sobol
+from UQpy.sensitivity.Sobol import compute_first_order as compute_first_order_sobol
from UQpy.utilities.ValidationTypes import (
RandomStateType,
PositiveInteger,
diff --git a/src/UQpy/sensitivity/CramervonMises.py b/src/UQpy/sensitivity/CramervonMises.py
index b4e2bebd6..cb3ef2ed6 100644
--- a/src/UQpy/sensitivity/CramervonMises.py
+++ b/src/UQpy/sensitivity/CramervonMises.py
@@ -22,8 +22,8 @@
from UQpy.sensitivity.baseclass.sensitivity import Sensitivity
from UQpy.sensitivity.baseclass.pickfreeze import generate_pick_freeze_samples
-from UQpy.sensitivity.sobol import compute_first_order as compute_first_order_sobol
-from UQpy.sensitivity.sobol import compute_total_order as compute_total_order_sobol
+from UQpy.sensitivity.Sobol import compute_first_order as compute_first_order_sobol
+from UQpy.sensitivity.Sobol import compute_total_order as compute_total_order_sobol
from UQpy.utilities.UQpyLoggingFormatter import UQpyLoggingFormatter
from UQpy.utilities.ValidationTypes import (
PositiveInteger,
diff --git a/src/UQpy/sensitivity/sobol.py b/src/UQpy/sensitivity/Sobol.py
similarity index 100%
rename from src/UQpy/sensitivity/sobol.py
rename to src/UQpy/sensitivity/Sobol.py
diff --git a/src/UQpy/sensitivity/__init__.py b/src/UQpy/sensitivity/__init__.py
index f391bebd2..2433a768b 100644
--- a/src/UQpy/sensitivity/__init__.py
+++ b/src/UQpy/sensitivity/__init__.py
@@ -1,6 +1,6 @@
from UQpy.sensitivity.MorrisSensitivity import MorrisSensitivity
from UQpy.sensitivity.PceSensitivity import PceSensitivity
-from UQpy.sensitivity.sobol import Sobol
+from UQpy.sensitivity.Sobol import Sobol
from UQpy.sensitivity.CramervonMises import CramervonMises
from UQpy.sensitivity.Chatterjee import Chatterjee
from UQpy.sensitivity.GeneralisedSobol import GeneralisedSobol
diff --git a/tests/unit_tests/sensitivity/test_baseclass.py b/tests/unit_tests/sensitivity/test_baseclass.py
index 458826d6f..9c1db1810 100644
--- a/tests/unit_tests/sensitivity/test_baseclass.py
+++ b/tests/unit_tests/sensitivity/test_baseclass.py
@@ -17,7 +17,7 @@
from UQpy.run_model.model_execution.PythonModel import PythonModel
from UQpy.distributions import Uniform
from UQpy.distributions.collection.JointIndependent import JointIndependent
-from UQpy.sensitivity.sobol import Sobol
+from UQpy.sensitivity.Sobol import Sobol
from UQpy.sensitivity.baseclass.pickfreeze import generate_pick_freeze_samples
# Prepare
diff --git a/tests/unit_tests/sensitivity/test_sobol.py b/tests/unit_tests/sensitivity/test_sobol.py
index 784eb6143..3c1f11700 100644
--- a/tests/unit_tests/sensitivity/test_sobol.py
+++ b/tests/unit_tests/sensitivity/test_sobol.py
@@ -55,7 +55,7 @@
from UQpy.run_model.model_execution.PythonModel import PythonModel
from UQpy.distributions import Uniform
from UQpy.distributions.collection.JointIndependent import JointIndependent
-from UQpy.sensitivity.sobol import Sobol
+from UQpy.sensitivity.Sobol import Sobol
# Prepare
###############################################################################
From e0ad8da9db9eb20330f53ffc8b3cb4139ebcdc37 Mon Sep 17 00:00:00 2001
From: Prateek Bhustali
Date: Mon, 23 May 2022 13:22:58 +0200
Subject: [PATCH 39/59] Changed baseclass module names to CamelCase
---
src/UQpy/sensitivity/Chatterjee.py | 2 +-
src/UQpy/sensitivity/CramervonMises.py | 4 ++--
src/UQpy/sensitivity/GeneralisedSobol.py | 4 ++--
src/UQpy/sensitivity/Sobol.py | 4 ++--
.../sensitivity/baseclass/{pickfreeze.py => PickFreeze.py} | 0
.../sensitivity/baseclass/{sensitivity.py => Sensitivity.py} | 0
src/UQpy/sensitivity/baseclass/__init__.py | 4 ++--
tests/unit_tests/sensitivity/test_baseclass.py | 2 +-
8 files changed, 10 insertions(+), 10 deletions(-)
rename src/UQpy/sensitivity/baseclass/{pickfreeze.py => PickFreeze.py} (100%)
rename src/UQpy/sensitivity/baseclass/{sensitivity.py => Sensitivity.py} (100%)
diff --git a/src/UQpy/sensitivity/Chatterjee.py b/src/UQpy/sensitivity/Chatterjee.py
index d694d59db..1eb13ff19 100644
--- a/src/UQpy/sensitivity/Chatterjee.py
+++ b/src/UQpy/sensitivity/Chatterjee.py
@@ -26,7 +26,7 @@
from typing import Union
from numbers import Integral
-from UQpy.sensitivity.baseclass.sensitivity import Sensitivity
+from UQpy.sensitivity.baseclass.Sensitivity import Sensitivity
from UQpy.sensitivity.Sobol import compute_first_order as compute_first_order_sobol
from UQpy.utilities.ValidationTypes import (
RandomStateType,
diff --git a/src/UQpy/sensitivity/CramervonMises.py b/src/UQpy/sensitivity/CramervonMises.py
index cb3ef2ed6..f6507274a 100644
--- a/src/UQpy/sensitivity/CramervonMises.py
+++ b/src/UQpy/sensitivity/CramervonMises.py
@@ -20,8 +20,8 @@
import numpy as np
from beartype import beartype
-from UQpy.sensitivity.baseclass.sensitivity import Sensitivity
-from UQpy.sensitivity.baseclass.pickfreeze import generate_pick_freeze_samples
+from UQpy.sensitivity.baseclass.Sensitivity import Sensitivity
+from UQpy.sensitivity.baseclass.PickFreeze import generate_pick_freeze_samples
from UQpy.sensitivity.Sobol import compute_first_order as compute_first_order_sobol
from UQpy.sensitivity.Sobol import compute_total_order as compute_total_order_sobol
from UQpy.utilities.UQpyLoggingFormatter import UQpyLoggingFormatter
diff --git a/src/UQpy/sensitivity/GeneralisedSobol.py b/src/UQpy/sensitivity/GeneralisedSobol.py
index 5311941b6..1b8764ca3 100644
--- a/src/UQpy/sensitivity/GeneralisedSobol.py
+++ b/src/UQpy/sensitivity/GeneralisedSobol.py
@@ -29,8 +29,8 @@
from typing import Union
from beartype import beartype
-from UQpy.sensitivity.baseclass.sensitivity import Sensitivity
-from UQpy.sensitivity.baseclass.pickfreeze import generate_pick_freeze_samples
+from UQpy.sensitivity.baseclass.Sensitivity import Sensitivity
+from UQpy.sensitivity.baseclass.PickFreeze import generate_pick_freeze_samples
from UQpy.utilities.UQpyLoggingFormatter import UQpyLoggingFormatter
from UQpy.utilities.ValidationTypes import (
PositiveFloat,
diff --git a/src/UQpy/sensitivity/Sobol.py b/src/UQpy/sensitivity/Sobol.py
index 3566a6fa4..99daec702 100644
--- a/src/UQpy/sensitivity/Sobol.py
+++ b/src/UQpy/sensitivity/Sobol.py
@@ -55,8 +55,8 @@
import numpy as np
from beartype import beartype
-from UQpy.sensitivity.baseclass.sensitivity import Sensitivity
-from UQpy.sensitivity.baseclass.pickfreeze import generate_pick_freeze_samples
+from UQpy.sensitivity.baseclass.Sensitivity import Sensitivity
+from UQpy.sensitivity.baseclass.PickFreeze import generate_pick_freeze_samples
from UQpy.utilities.UQpyLoggingFormatter import UQpyLoggingFormatter
from UQpy.utilities.ValidationTypes import (
PositiveInteger,
diff --git a/src/UQpy/sensitivity/baseclass/pickfreeze.py b/src/UQpy/sensitivity/baseclass/PickFreeze.py
similarity index 100%
rename from src/UQpy/sensitivity/baseclass/pickfreeze.py
rename to src/UQpy/sensitivity/baseclass/PickFreeze.py
diff --git a/src/UQpy/sensitivity/baseclass/sensitivity.py b/src/UQpy/sensitivity/baseclass/Sensitivity.py
similarity index 100%
rename from src/UQpy/sensitivity/baseclass/sensitivity.py
rename to src/UQpy/sensitivity/baseclass/Sensitivity.py
diff --git a/src/UQpy/sensitivity/baseclass/__init__.py b/src/UQpy/sensitivity/baseclass/__init__.py
index 7e11a2b63..99b9c2d0a 100644
--- a/src/UQpy/sensitivity/baseclass/__init__.py
+++ b/src/UQpy/sensitivity/baseclass/__init__.py
@@ -1,2 +1,2 @@
-from UQpy.sensitivity.baseclass.sensitivity import *
-from UQpy.sensitivity.baseclass.pickfreeze import *
+from UQpy.sensitivity.baseclass.Sensitivity import *
+from UQpy.sensitivity.baseclass.PickFreeze import *
diff --git a/tests/unit_tests/sensitivity/test_baseclass.py b/tests/unit_tests/sensitivity/test_baseclass.py
index 9c1db1810..724abb298 100644
--- a/tests/unit_tests/sensitivity/test_baseclass.py
+++ b/tests/unit_tests/sensitivity/test_baseclass.py
@@ -18,7 +18,7 @@
from UQpy.distributions import Uniform
from UQpy.distributions.collection.JointIndependent import JointIndependent
from UQpy.sensitivity.Sobol import Sobol
-from UQpy.sensitivity.baseclass.pickfreeze import generate_pick_freeze_samples
+from UQpy.sensitivity.baseclass.PickFreeze import generate_pick_freeze_samples
# Prepare
###############################################################################
From d78b28ba8da7405d111d2715a459c694aa023697 Mon Sep 17 00:00:00 2001
From: Prateek Bhustali
Date: Mon, 23 May 2022 14:55:26 +0200
Subject: [PATCH 40/59] Changed variable name: CI -> confidence interval
---
src/UQpy/sensitivity/Chatterjee.py | 10 ++++---
src/UQpy/sensitivity/CramervonMises.py | 10 ++++---
src/UQpy/sensitivity/GeneralisedSobol.py | 16 ++++++----
src/UQpy/sensitivity/Sobol.py | 30 +++++++++++--------
.../sensitivity/test_cramer_von_mises.py | 2 +-
.../sensitivity/test_generalised_sobol.py | 6 ++--
tests/unit_tests/sensitivity/test_sobol.py | 10 ++++---
7 files changed, 51 insertions(+), 33 deletions(-)
diff --git a/src/UQpy/sensitivity/Chatterjee.py b/src/UQpy/sensitivity/Chatterjee.py
index 1eb13ff19..a171e6247 100644
--- a/src/UQpy/sensitivity/Chatterjee.py
+++ b/src/UQpy/sensitivity/Chatterjee.py
@@ -81,7 +81,7 @@ def __init__(self, runmodel_object, dist_object, random_state=None, **kwargs):
self.sobol_i = None
"Sobol indices computed using the rank statistics, :class:`numpy.ndarray` of shape :code:`(num_vars, 1)`"
- self.CI_chatterjee_i = None
+ self.confidence_interval_chatterjee_i = None
"Confidence intervals for the Chatterjee sensitivity indices, :class:`numpy.ndarray` of shape :code:`(num_vars, 2)`"
self.num_vars = None
@@ -115,7 +115,7 @@ def run(
:return: A :class:`dict` with the following keys: \
:code:`'chatterjee_i'` of shape :code:`(num_vars, 1)`, \
- :code:`'CI_chatterjee_i'` of shape :code:`(num_vars, 2)`, \
+ :code:`'confidence_interval_chatterjee_i'` of shape :code:`(num_vars, 2)`, \
:code:`'sobol_i'` of shape :code:`(num_vars, 1)`.
"""
@@ -185,7 +185,7 @@ def run(
estimator_inputs = [A_samples, A_model_evals]
- self.CI_chatterjee_i = self.bootstrapping(
+ self.confidence_interval_chatterjee_i = self.bootstrapping(
self.compute_chatterjee_indices,
estimator_inputs,
computed_indices["chatterjee_i"],
@@ -197,7 +197,9 @@ def run(
"UQpy: Confidence intervals for Chatterjee indices computed successfully.\n"
)
- computed_indices["CI_chatterjee_i"] = self.CI_chatterjee_i
+ computed_indices[
+ "confidence_interval_chatterjee_i"
+ ] = self.confidence_interval_chatterjee_i
return computed_indices
diff --git a/src/UQpy/sensitivity/CramervonMises.py b/src/UQpy/sensitivity/CramervonMises.py
index f6507274a..745557cd9 100644
--- a/src/UQpy/sensitivity/CramervonMises.py
+++ b/src/UQpy/sensitivity/CramervonMises.py
@@ -79,7 +79,7 @@ def __init__(
self.CVM_i = None
"First order Cramér-von Mises indices, :class:`numpy.ndarray` of shape :code:`(num_vars, 1)`"
- self.CI_CVM_i = None
+ self.confidence_interval_CVM_i = None
"Confidence intervals of the first order Cramér-von Mises indices, :class:`numpy.ndarray` of shape :code:`(num_vars, 2)`"
self.sobol_i = None
@@ -124,7 +124,7 @@ def run(
:return: A :class:`dict` with the following keys: \
:code:`CVM_i` of shape :code:`(num_vars, 1)`, \
- :code:`CI_CVM_i` of shape :code:`(num_vars, 2)`, \
+ :code:`confidence_interval_CVM_i` of shape :code:`(num_vars, 2)`, \
:code:`sobol_i` of shape :code:`(num_vars, 1)`, \
:code:`sobol_total_i` of shape :code:`(num_vars, 1)`.
@@ -205,7 +205,7 @@ def run(
C_i_model_evals,
]
- self.CI_CVM_i = self.bootstrapping(
+ self.confidence_interval_CVM_i = self.bootstrapping(
self.pick_and_freeze_estimator,
estimator_inputs,
computed_indices["CVM_i"],
@@ -218,7 +218,9 @@ def run(
)
# Store the indices in the dictionary
- computed_indices["CI_CVM_i"] = self.CI_CVM_i
+ computed_indices[
+ "confidence_interval_CVM_i"
+ ] = self.confidence_interval_CVM_i
################## COMPUTE SOBOL INDICES ##################
diff --git a/src/UQpy/sensitivity/GeneralisedSobol.py b/src/UQpy/sensitivity/GeneralisedSobol.py
index 1b8764ca3..2a976e004 100644
--- a/src/UQpy/sensitivity/GeneralisedSobol.py
+++ b/src/UQpy/sensitivity/GeneralisedSobol.py
@@ -113,8 +113,8 @@ def run(
:return: A :class:`dict` with the following keys: \
:code:`gen_sobol_i` of shape :code:`(num_vars, 1)`, \
:code:`gen_sobol_total_i` of shape :code:`(num_vars, 1)`, \
- :code:`CI_gen_sobol_i` of shape :code:`(num_vars, 2)`, \
- :code:`CI_gen_sobol_total_i` of shape :code:`(num_vars, 2)`.
+ :code:`confidence_interval_gen_sobol_i` of shape :code:`(num_vars, 2)`, \
+ :code:`confidence_interval_gen_sobol_total_i` of shape :code:`(num_vars, 2)`.
"""
@@ -225,7 +225,7 @@ def run(
]
# First order generalised Sobol indices
- self.CI_gen_sobol_i = self.bootstrapping(
+ self.confidence_interval_gen_sobol_i = self.bootstrapping(
self.compute_first_order_generalised_sobol_indices,
estimator_inputs,
computed_indices["gen_sobol_i"],
@@ -238,7 +238,7 @@ def run(
)
# Total order generalised Sobol indices
- self.CI_gen_sobol_total_i = self.bootstrapping(
+ self.confidence_interval_gen_sobol_total_i = self.bootstrapping(
self.compute_total_order_generalised_sobol_indices,
estimator_inputs,
computed_indices["gen_sobol_total_i"],
@@ -251,8 +251,12 @@ def run(
)
# Store the indices in the dictionary
- computed_indices["CI_gen_sobol_i"] = self.CI_gen_sobol_i
- computed_indices["CI_gen_sobol_total_i"] = self.CI_gen_sobol_total_i
+ computed_indices[
+ "confidence_interval_gen_sobol_i"
+ ] = self.confidence_interval_gen_sobol_i
+ computed_indices[
+ "confidence_interval_gen_sobol_total_i"
+ ] = self.confidence_interval_gen_sobol_total_i
return computed_indices
diff --git a/src/UQpy/sensitivity/Sobol.py b/src/UQpy/sensitivity/Sobol.py
index 99daec702..84e9b3510 100644
--- a/src/UQpy/sensitivity/Sobol.py
+++ b/src/UQpy/sensitivity/Sobol.py
@@ -118,13 +118,13 @@ def __init__(
self.sobol_ij = None
"Second order Sobol indices, :class:`numpy.ndarray` of shape `(num_second_order_terms, n_outputs)`"
- self.CI_sobol_i = None
+ self.confidence_interval_sobol_i = None
"Confidence intervals for the first order Sobol indices, :class:`numpy.ndarray` of shape `(num_vars, 2)`"
- self.CI_sobol_total_i = None
+ self.confidence_interval_sobol_total_i = None
"Confidence intervals for the total order Sobol indices, :class:`numpy.ndarray` of shape `(num_vars, 2)`"
- self.CI_sobol_ij = None
+ self.confidence_interval_sobol_ij = None
"Confidence intervals for the second order Sobol indices, :class:`numpy.ndarray` of shape `(num_second_order_terms, 2)`"
self.n_samples = None
@@ -176,11 +176,11 @@ def run(
:code:`sobol_i` of shape :code:`(num_vars, 1)`, \
:code:`sobol_total_i` of shape :code:`(num_vars, 1)`, \
:code:`sobol_ij` of shape :code:`(num_second_order_terms, 1)`, \
- :code:`CI_sobol_i` of shape :code:`(num_vars, 2)`, \
+ :code:`confidence_interval_sobol_i` of shape :code:`(num_vars, 2)`, \
if multioutput: Shape: `(n_outputs, num_vars, 2)`, \
- :code:`CI_sobol_total_i` of shape :code:`(num_vars, 2)`, \
+ :code:`confidence_interval_sobol_total_i` of shape :code:`(num_vars, 2)`, \
if multioutput: Shape: `(n_outputs, num_vars, 2)`, \
- :code:`CI_sobol_ij` of shape :code:`(num_second_order_terms, 2)`
+ :code:`confidence_interval_sobol_ij` of shape :code:`(num_second_order_terms, 2)`
if multioutput: Shape: `(n_outputs, num_second_order_terms, 2)`, \
"""
@@ -321,7 +321,7 @@ def run(
]
# First order Sobol indices
- self.CI_sobol_i = self.bootstrapping(
+ self.confidence_interval_sobol_i = self.bootstrapping(
compute_first_order,
estimator_inputs,
computed_indices["sobol_i"],
@@ -334,10 +334,12 @@ def run(
"UQpy: Confidence intervals for First order Sobol indices computed successfully."
)
- computed_indices["CI_sobol_i"] = self.CI_sobol_i
+ computed_indices[
+ "confidence_interval_sobol_i"
+ ] = self.confidence_interval_sobol_i
# Total order Sobol indices
- self.CI_sobol_total_i = self.bootstrapping(
+ self.confidence_interval_sobol_total_i = self.bootstrapping(
compute_total_order,
estimator_inputs,
computed_indices["sobol_total_i"],
@@ -350,11 +352,13 @@ def run(
"UQpy: Confidence intervals for Total order Sobol indices computed successfully."
)
- computed_indices["CI_sobol_total_i"] = self.CI_sobol_total_i
+ computed_indices[
+ "confidence_interval_sobol_total_i"
+ ] = self.confidence_interval_sobol_total_i
# Second order Sobol indices
if estimate_second_order:
- self.CI_sobol_ij = self.bootstrapping(
+ self.confidence_interval_sobol_ij = self.bootstrapping(
compute_second_order,
estimator_inputs,
computed_indices["sobol_ij"],
@@ -368,7 +372,9 @@ def run(
"UQpy: Confidence intervals for Second order Sobol indices computed successfully."
)
- computed_indices["CI_sobol_ij"] = self.CI_sobol_ij
+ computed_indices[
+ "confidence_interval_sobol_ij"
+ ] = self.confidence_interval_sobol_ij
return computed_indices
diff --git a/tests/unit_tests/sensitivity/test_cramer_von_mises.py b/tests/unit_tests/sensitivity/test_cramer_von_mises.py
index c94ddbae0..ed9a55b24 100644
--- a/tests/unit_tests/sensitivity/test_cramer_von_mises.py
+++ b/tests/unit_tests/sensitivity/test_cramer_von_mises.py
@@ -152,7 +152,7 @@ def bootstrap_CVM_index_variance(CVM_object, NUM_SAMPLES):
)
First_order = computed_indices["CVM_i"].ravel()
- upper_bound_first_order = computed_indices["CI_CVM_i"][:, 1]
+ upper_bound_first_order = computed_indices["confidence_interval_CVM_i"][:, 1]
#### Compute variance ####
std_bootstrap_first_order = (upper_bound_first_order - First_order) / delta
diff --git a/tests/unit_tests/sensitivity/test_generalised_sobol.py b/tests/unit_tests/sensitivity/test_generalised_sobol.py
index 3b5df3167..0b1f0919c 100644
--- a/tests/unit_tests/sensitivity/test_generalised_sobol.py
+++ b/tests/unit_tests/sensitivity/test_generalised_sobol.py
@@ -209,8 +209,10 @@ def bootstrap_generalised_sobol_index_variance(
gen_sobol_i = computed_indices["gen_sobol_i"].ravel()
gen_sobol_total_i = computed_indices["gen_sobol_total_i"].ravel()
- upper_bound_first_order = computed_indices["CI_gen_sobol_i"][:, 1]
- upper_bound_total_order = computed_indices["CI_gen_sobol_total_i"][:, 1]
+ upper_bound_first_order = computed_indices["confidence_interval_gen_sobol_i"][:, 1]
+ upper_bound_total_order = computed_indices["confidence_interval_gen_sobol_total_i"][
+ :, 1
+ ]
std_bootstrap_first_order = (upper_bound_first_order - gen_sobol_i) / delta
std_bootstrap_total_order = (upper_bound_total_order - gen_sobol_total_i) / delta
diff --git a/tests/unit_tests/sensitivity/test_sobol.py b/tests/unit_tests/sensitivity/test_sobol.py
index 3c1f11700..ff26801da 100644
--- a/tests/unit_tests/sensitivity/test_sobol.py
+++ b/tests/unit_tests/sensitivity/test_sobol.py
@@ -183,12 +183,14 @@ def bootstrap_sobol_index_variance(sobol_object, NUM_SAMPLES):
First_order = computed_indices["sobol_i"].ravel()
Total_order = computed_indices["sobol_total_i"].ravel()
- CI_first_order = computed_indices["CI_sobol_i"]
- CI_total_order = computed_indices["CI_sobol_total_i"]
+ confidence_interval_first_order = computed_indices["confidence_interval_sobol_i"]
+ confidence_interval_total_order = computed_indices[
+ "confidence_interval_sobol_total_i"
+ ]
#### Compute variance ####
- upper_bound_first_order = CI_first_order[:, 1]
- upper_bound_total_order = CI_total_order[:, 1]
+ upper_bound_first_order = confidence_interval_first_order[:, 1]
+ upper_bound_total_order = confidence_interval_total_order[:, 1]
std_bootstrap_first_order = (upper_bound_first_order - First_order) / delta
std_bootstrap_total_order = (upper_bound_total_order - Total_order) / delta
From caffb2d07770a4c295e94b525bcb27a956bb3f32 Mon Sep 17 00:00:00 2001
From: Prateek Bhustali
Date: Mon, 23 May 2022 15:17:33 +0200
Subject: [PATCH 41/59] Changed variable name: CI -> confidence interval
---
.../code/sensitivity/chatterjee/plot_chatterjee_ishigami.py | 2 +-
docs/code/sensitivity/sobol/plot_sobol_ishigami.py | 4 ++--
docs/source/sensitivity/cramer_von_mises.rst | 2 +-
docs/source/sensitivity/sobol.rst | 6 +++---
4 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/docs/code/sensitivity/chatterjee/plot_chatterjee_ishigami.py b/docs/code/sensitivity/chatterjee/plot_chatterjee_ishigami.py
index 448309e3a..21803cc16 100644
--- a/docs/code/sensitivity/chatterjee/plot_chatterjee_ishigami.py
+++ b/docs/code/sensitivity/chatterjee/plot_chatterjee_ishigami.py
@@ -63,7 +63,7 @@
# **Confidence intervals for the Chatterjee indices**
# %%
-computed_indices["CI_chatterjee_i"]
+computed_indices["confidence_interval_chatterjee_i"]
# %% [markdown]
# **Estimated Sobol indices**
diff --git a/docs/code/sensitivity/sobol/plot_sobol_ishigami.py b/docs/code/sensitivity/sobol/plot_sobol_ishigami.py
index dc118034f..dc1ce0c62 100644
--- a/docs/code/sensitivity/sobol/plot_sobol_ishigami.py
+++ b/docs/code/sensitivity/sobol/plot_sobol_ishigami.py
@@ -105,10 +105,10 @@
# **Confidence intervals for first order Sobol indices**
# %%
-computed_indices["CI_sobol_i"]
+computed_indices["confidence_interval_sobol_i"]
# %% [markdown]
# **Confidence intervals for total order Sobol indices**
# %%
-computed_indices["CI_sobol_total_i"]
+computed_indices["confidence_interval_sobol_total_i"]
diff --git a/docs/source/sensitivity/cramer_von_mises.rst b/docs/source/sensitivity/cramer_von_mises.rst
index 1958c128a..8ee05455a 100644
--- a/docs/source/sensitivity/cramer_von_mises.rst
+++ b/docs/source/sensitivity/cramer_von_mises.rst
@@ -50,7 +50,7 @@ Methods
Attributes
""""""""""
.. autoattribute:: UQpy.sensitivity.CramervonMises.CVM_i
-.. autoattribute:: UQpy.sensitivity.CramervonMises.CI_CVM_i
+.. autoattribute:: UQpy.sensitivity.CramervonMises.confidence_interval_CVM_i
.. autoattribute:: UQpy.sensitivity.CramervonMises.sobol_i
.. autoattribute:: UQpy.sensitivity.CramervonMises.sobol_total_i
.. autoattribute:: UQpy.sensitivity.CramervonMises.n_samples
diff --git a/docs/source/sensitivity/sobol.rst b/docs/source/sensitivity/sobol.rst
index 45f20b612..fee11e54e 100644
--- a/docs/source/sensitivity/sobol.rst
+++ b/docs/source/sensitivity/sobol.rst
@@ -80,9 +80,9 @@ Attributes
""""""""""
.. autoattribute:: UQpy.sensitivity.Sobol.sobol_i
.. autoattribute:: UQpy.sensitivity.Sobol.sobol_total_i
-.. autoattribute:: UQpy.sensitivity.Sobol.CI_sobol_i
-.. autoattribute:: UQpy.sensitivity.Sobol.CI_sobol_total_i
-.. autoattribute:: UQpy.sensitivity.Sobol.CI_sobol_ij
+.. autoattribute:: UQpy.sensitivity.Sobol.confidence_interval_sobol_i
+.. autoattribute:: UQpy.sensitivity.Sobol.confidence_interval_sobol_total_i
+.. autoattribute:: UQpy.sensitivity.Sobol.confidence_interval_sobol_ij
.. autoattribute:: UQpy.sensitivity.Sobol.n_samples
.. autoattribute:: UQpy.sensitivity.Sobol.num_vars
.. autoattribute:: UQpy.sensitivity.Sobol.multioutput
From f3b59350063dd7a2c9809e265efddc4108bc76bb Mon Sep 17 00:00:00 2001
From: Prateek Bhustali
Date: Mon, 23 May 2022 15:18:47 +0200
Subject: [PATCH 42/59] Added references to bibliography for Chatterjee
---
docs/code/sensitivity/chatterjee/README.rst | 10 +--
docs/source/bibliography.bib | 73 ++++++++++++++++-----
docs/source/sensitivity/chatterjee.rst | 7 +-
src/UQpy/sensitivity/Chatterjee.py | 2 +-
4 files changed, 64 insertions(+), 28 deletions(-)
diff --git a/docs/code/sensitivity/chatterjee/README.rst b/docs/code/sensitivity/chatterjee/README.rst
index 540581862..5e48ef1a9 100644
--- a/docs/code/sensitivity/chatterjee/README.rst
+++ b/docs/code/sensitivity/chatterjee/README.rst
@@ -4,16 +4,12 @@ These examples serve as a guide for using the Chatterjee sensitivity module. The
1. **Ishigami function**
- In addition to the Pick and Freeze scheme, the Sobol indices can be estimated using the rank statistics approach [2]_. We demonstrate this estimation of the Sobol indices using the Ishigami function.
+ In addition to the Pick and Freeze scheme, the Sobol indices can be estimated using the rank statistics approach :cite:`gamboa2020global`. We demonstrate this estimation of the Sobol indices using the Ishigami function.
2. **Exponential function**
- For the Exponential model, analytical Cramér-von Mises indices are available [1]_ and since they are equivalent to the Chatterjee indices in the sample limit, they are shown here.
+ For the Exponential model, analytical Cramér-von Mises indices are available :cite:`CVM` and since they are equivalent to the Chatterjee indices in the sample limit, they are shown here.
3. **Sobol function**
- This example was considered in [2]_ (page 18) to compare the Pick and Freeze scheme with the rank statistics approach for estimating the Sobol indices.
-
-.. [1] Gamboa, F., Klein, T., & Lagnoux, A. (2018). Sensitivity Analysis Based on Cramér-von Mises Distance. SIAM/ASA Journal on Uncertainty Quantification, 6(2), 522-548. doi:10.1137/15M1025621. (`Link `_)
-
-.. [2] Fabrice Gamboa, Pierre Gremaud, Thierry Klein, and Agnès Lagnoux. (2020). Global Sensitivity Analysis: a new generation of mighty estimators based on rank statistics.
+ This example was considered in :cite:`gamboa2020global` (page 18) to compare the Pick and Freeze scheme with the rank statistics approach for estimating the Sobol indices.
diff --git a/docs/source/bibliography.bib b/docs/source/bibliography.bib
index de41a858b..3d2c3a281 100644
--- a/docs/source/bibliography.bib
+++ b/docs/source/bibliography.bib
@@ -465,22 +465,6 @@ @article{Stretch2
pages={306–312}
}
-@article{Morris1,
-title = {An effective screening design for sensitivity analysis of large models},
-journal = {Environmental Modelling & Software},
-volume = {22},
-number = {10},
-pages = {1509-1518},
-year = {2007},
-note = {Modelling, computer-assisted simulations, and mapping of dangerous phenomena for hazard assessment},
-issn = {1364-8152},
-doi = {https://doi.org/10.1016/j.envsoft.2006.10.004},
-url = {https://www.sciencedirect.com/science/article/pii/S1364815206002805},
-author = {Francesca Campolongo and Jessica Cariboni and Andrea Saltelli},
-keywords = {Sensitivity analysis, Screening problem, Model-free methods, Effective sampling strategy, Dimethylsulphide (DMS)},
-abstract = {In 1991 Morris proposed an effective screening sensitivity measure to identify the few important factors in models with many factors. The method is based on computing for each input a number of incremental ratios, namely elementary effects, which are then averaged to assess the overall importance of the input. Despite its value, the method is still rarely used and instead local analyses varying one factor at a time around a baseline point are usually employed. In this piece of work we propose a revised version of the elementary effects method, improved in terms of both the definition of the measure and the sampling strategy. In the present form the method shares many of the positive qualities of the variance-based techniques, having the advantage of a lower computational cost, as demonstrated by the analytical examples. The method is employed to assess the sensitivity of a chemical reaction model for dimethylsulphide (DMS), a gas involved in climate change. Results of the sensitivity analysis open up the ground for model reconsideration: some model components may need a more thorough modelling effort while some others may need to be simplified.}
-}
-
@article{StochasticProcess1,
title = {Digital simulation of random processes and its applications},
journal = {Journal of Sound and Vibration},
@@ -744,3 +728,60 @@ @article{dsilva2018parsimonious
year={2018},
publisher={Elsevier}
}
+
+################ Sensitivity Analysis ########################
+
+# Morris
+@article{Morris1,
+title = {An effective screening design for sensitivity analysis of large models},
+journal = {Environmental Modelling & Software},
+volume = {22},
+number = {10},
+pages = {1509-1518},
+year = {2007},
+note = {Modelling, computer-assisted simulations, and mapping of dangerous phenomena for hazard assessment},
+issn = {1364-8152},
+doi = {https://doi.org/10.1016/j.envsoft.2006.10.004},
+url = {https://www.sciencedirect.com/science/article/pii/S1364815206002805},
+author = {Francesca Campolongo and Jessica Cariboni and Andrea Saltelli},
+keywords = {Sensitivity analysis, Screening problem, Model-free methods, Effective sampling strategy, Dimethylsulphide (DMS)},
+abstract = {In 1991 Morris proposed an effective screening sensitivity measure to identify the few important factors in models with many factors. The method is based on computing for each input a number of incremental ratios, namely elementary effects, which are then averaged to assess the overall importance of the input. Despite its value, the method is still rarely used and instead local analyses varying one factor at a time around a baseline point are usually employed. In this piece of work we propose a revised version of the elementary effects method, improved in terms of both the definition of the measure and the sampling strategy. In the present form the method shares many of the positive qualities of the variance-based techniques, having the advantage of a lower computational cost, as demonstrated by the analytical examples. The method is employed to assess the sensitivity of a chemical reaction model for dimethylsulphide (DMS), a gas involved in climate change. Results of the sensitivity analysis open up the ground for model reconsideration: some model components may need a more thorough modelling effort while some others may need to be simplified.}
+}
+
+# Chatterjee
+@article{Chatterjee,
+author = {Sourav Chatterjee},
+title = {A New Coefficient of Correlation},
+journal = {Journal of the American Statistical Association},
+volume = {116},
+number = {536},
+pages = {2009-2022},
+year = {2021},
+publisher = {Taylor & Francis},
+doi = {10.1080/01621459.2020.1758115},
+URL = {https://doi.org/10.1080/01621459.2020.1758115},
+eprint = {https://doi.org/10.1080/01621459.2020.1758115}
+}
+
+@misc{gamboa2020global,
+ title={Global Sensitivity Analysis: a new generation of mighty estimators based on rank statistics},
+ author={Fabrice Gamboa and Pierre Gremaud and Thierry Klein and Agnès Lagnoux},
+ year={2020},
+ eprint={2003.01772},
+ archivePrefix={arXiv},
+ primaryClass={math.ST}
+}
+
+# Cramér-von Mises index
+@article{CVM,
+author = {Gamboa, Fabrice and Klein, Thierry and Lagnoux, Agnès},
+title = {Sensitivity Analysis Based on Cramér--von Mises Distance},
+journal = {SIAM/ASA Journal on Uncertainty Quantification},
+volume = {6},
+number = {2},
+pages = {522-548},
+year = {2018},
+doi = {10.1137/15M1025621},
+URL = {https://doi.org/10.1137/15M1025621},
+eprint = {https://doi.org/10.1137/15M1025621},
+}
diff --git a/docs/source/sensitivity/chatterjee.rst b/docs/source/sensitivity/chatterjee.rst
index e2e8ce006..57921b4b2 100644
--- a/docs/source/sensitivity/chatterjee.rst
+++ b/docs/source/sensitivity/chatterjee.rst
@@ -1,9 +1,9 @@
Chatterjee indices
----------------------------------------
-The Chatterjee index measures the strength of the relationship between :math:`X` and :math:`Y` using rank statistics.
+The Chatterjee index measures the strength of the relationship between :math:`X` and :math:`Y` using rank statistics :cite:`Chatterjee`.
-Consider :math:`n` samples of random variables :math:`X` and :math:`Y`, with :math:`(X_{(1)}, Y_{(1)}), \ldots,(X_{(n)}, Y_{(n)})` such that :math:`X_{(1)} \leq \cdots \leq X_{(n)}`. Here, random variable :math:`X` can be one of the inputs of a model and :math:`Y` be the model response. If :math:`X_{i}`'s have no ties, there is a unique way of doing this (case of ties is also taken into account in the implementation, see [1]_). Let :math:`r_{i}`` be the rank of :math:`Y_{(i)}`, that is, the number of :math:`j` such that :math:`Y_{(j)} \leq Y_{(i)}`.The Chatterjee index :math:`\xi_{n}(X, Y)` is defined as:
+Consider :math:`n` samples of random variables :math:`X` and :math:`Y`, with :math:`(X_{(1)}, Y_{(1)}), \ldots,(X_{(n)}, Y_{(n)})` such that :math:`X_{(1)} \leq \cdots \leq X_{(n)}`. Here, random variable :math:`X` can be one of the inputs of a model and :math:`Y` be the model response. If :math:`X_{i}`'s have no ties, there is a unique way of doing this (case of ties is also taken into account in the implementation, see :cite:`Chatterjee`). Let :math:`r_{i}`` be the rank of :math:`Y_{(i)}`, that is, the number of :math:`j` such that :math:`Y_{(j)} \leq Y_{(i)}`.The Chatterjee index :math:`\xi_{n}(X, Y)` is defined as:
.. math::
@@ -11,7 +11,6 @@ Consider :math:`n` samples of random variables :math:`X` and :math:`Y`, with :ma
The Chatterjee index converges for :math:`n \rightarrow \infty` to the Cramér-von Mises index and is faster to estimate than using the Pick and Freeze approach in the Cramér-von Mises index.
-.. [1] Sourav Chatterjee (2021) A New Coefficient of Correlation, Journal of the American Statistical Association, 116:536, 2009-2022, DOI: 10.1080/01621459.2020.1758115 (`Link `_)
Chatterjee Class
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -29,7 +28,7 @@ Attributes
""""""""""
.. autoattribute:: UQpy.sensitivity.Chatterjee.chatterjee_i
.. autoattribute:: UQpy.sensitivity.Chatterjee.sobol_i
-.. autoattribute:: UQpy.sensitivity.Chatterjee.CI_chatterjee_i
+.. autoattribute:: UQpy.sensitivity.Chatterjee.confidence_interval_chatterjee_i
.. autoattribute:: UQpy.sensitivity.Chatterjee.num_vars
.. autoattribute:: UQpy.sensitivity.Chatterjee.n_samples
diff --git a/src/UQpy/sensitivity/Chatterjee.py b/src/UQpy/sensitivity/Chatterjee.py
index a171e6247..53c470b5b 100644
--- a/src/UQpy/sensitivity/Chatterjee.py
+++ b/src/UQpy/sensitivity/Chatterjee.py
@@ -283,7 +283,7 @@ def rank_analog_to_pickfreeze(
):
r"""
Computing the :math:`N(j)` for each :math:`j \in \{1, \ldots, n\}`
- as in eq.(8) in [6]_, where :math:`n` is the size of :math:`X`.
+ as in eq.(8) in :cite:`gamboa2020global`, where :math:`n` is the size of :math:`X`.
.. math::
:nowrap:
From e3f0d7a6661af28d59c9c2ed5a5abf265f17f37d Mon Sep 17 00:00:00 2001
From: Prateek Bhustali
Date: Mon, 23 May 2022 15:23:05 +0200
Subject: [PATCH 43/59] Added references to bibliography for CVM
---
docs/code/sensitivity/cramer_von_mises/README.rst | 6 ++----
docs/source/sensitivity/cramer_von_mises.rst | 8 ++------
2 files changed, 4 insertions(+), 10 deletions(-)
diff --git a/docs/code/sensitivity/cramer_von_mises/README.rst b/docs/code/sensitivity/cramer_von_mises/README.rst
index b87758792..59036863e 100644
--- a/docs/code/sensitivity/cramer_von_mises/README.rst
+++ b/docs/code/sensitivity/cramer_von_mises/README.rst
@@ -4,10 +4,8 @@ These examples serve as a guide for using the Cramér-von Mises sensitivity modu
1. **Exponential function**
- For the Exponential model, analytical Cramér-von Mises indices are available [1]_.
+ For the Exponential model, analytical Cramér-von Mises indices are available :cite:`CVM`.
2. **Sobol function**
- The Cramér-von Mises indices are computed using the Pick and Freeze approach [1]_. These model evaluations can be used to estimate the Sobol indices as well. We demonstrate this using the Sobol function.
-
-.. [1] Gamboa, F., Klein, T., & Lagnoux, A. (2018). Sensitivity Analysis Based on Cramér-von Mises Distance. SIAM/ASA Journal on Uncertainty Quantification, 6(2), 522-548. doi:10.1137/15M1025621. (`Link `_)
\ No newline at end of file
+ The Cramér-von Mises indices are computed using the Pick and Freeze approach :cite:`CVM`. These model evaluations can be used to estimate the Sobol indices as well. We demonstrate this using the Sobol function.
diff --git a/docs/source/sensitivity/cramer_von_mises.rst b/docs/source/sensitivity/cramer_von_mises.rst
index 8ee05455a..22477cbc2 100644
--- a/docs/source/sensitivity/cramer_von_mises.rst
+++ b/docs/source/sensitivity/cramer_von_mises.rst
@@ -1,7 +1,7 @@
Cramér-von Mises indices
----------------------------------------
-A sensitivity index based on the Cramér-von Mises distance. In contrast to the variance based Sobol indices, it takes into account the whole distribution of the model output and is therefore considered as a moment-free method [1]_. Furthermore the index can be naturally extended to multivariate model outputs (not implemented yet in UQPy).
+A sensitivity index based on the Cramér-von Mises distance. In contrast to the variance based Sobol indices, it takes into account the whole distribution of the model output and is therefore considered as a moment-free method :cite:`CVM`. Furthermore the index can be naturally extended to multivariate model outputs (not implemented yet in UQPy).
Consider a model :math:`Y=f(X): \mathbb{R}^d \rightarrow \mathbb{R}^k` with :math:`d` inputs :math:`X_{(1)}, X_{(2)}, \ldots, X_{(d)}` and :math:`k` outputs :math:`Y_{(1)}, Y_{(2)}, \ldots, Y_{(k)}`. We define the cumulative distribution function :math:`F(t)` of :math:`Y` as:
@@ -29,11 +29,7 @@ and the total Cramér-von Mises index :math:`S_{2, C V M}^{T o t, i}` (for input
S_{2, C V M}^{T o t, i}:=1-S_{2, C V M}^{\sim i}=1-\frac{\int_{\mathbb{R}^{k}} \mathbb{E}\left[\left(F(t)-F^{\sim i}(t)\right)^{2}\right] d F(t)}{\int_{\mathbb{R}^{k}} F(t)(1-F(t)) d F(t)}
-The above first and total order indices are estimated using the Pick-and-Freeze approach. This requires :math:`N(d+2)` model evaluations, where :math:`N` is the number of samples. (For implementation details, see also [2]_.)
-
-.. [1] Gamboa, F., Klein, T., & Lagnoux, A. (2018). Sensitivity Analysis Based on Cramér-von Mises Distance. SIAM/ASA Journal on Uncertainty Quantification, 6(2), 522-548. doi:10.1137/15M1025621. (`Link `_)
-
-.. [2] Gamboa, F., Gremaud, P., Klein, T., & Lagnoux, A. (2020). Global Sensitivity Analysis: a new generation of mighty estimators based on rank statistics. arXiv [math.ST]. (`Link `_)
+The above first and total order indices are estimated using the Pick-and-Freeze approach. This requires :math:`N(d+2)` model evaluations, where :math:`N` is the number of samples. (For implementation details, see also :cite:`gamboa2020global`.)
Cramér-von Mises Class
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
From 168f7d3e389ce93de2fcdd2ea8927603d00c8deb Mon Sep 17 00:00:00 2001
From: Prateek Bhustali
Date: Mon, 23 May 2022 15:30:31 +0200
Subject: [PATCH 44/59] Added references to bibliography for GSI
---
.../sensitivity/generalised_sobol/README.rst | 6 ++----
docs/source/bibliography.bib | 16 ++++++++++++++++
docs/source/sensitivity/generalised_sobol.rst | 5 +----
3 files changed, 19 insertions(+), 8 deletions(-)
diff --git a/docs/code/sensitivity/generalised_sobol/README.rst b/docs/code/sensitivity/generalised_sobol/README.rst
index 78ede7984..44406106e 100644
--- a/docs/code/sensitivity/generalised_sobol/README.rst
+++ b/docs/code/sensitivity/generalised_sobol/README.rst
@@ -5,10 +5,8 @@ These examples serve as a guide for using the GSI sensitivity module. They have
1. **Mechanical oscillator ODE**
- The GSI sensitivity indices are computed for a mechanical oscillator governed by a second-order differential equation [1]_. The model outputs the displacement of the oscillator for a given time period. Unlike the pointwise-in-time Sobol indices, which provide the sensitivity of the model parameters at each point in time, the GSI indices summarise the sensitivities of the model parameters over the entire time period.
+ The GSI sensitivity indices are computed for a mechanical oscillator governed by a second-order differential equation :cite:`GSI`. The model outputs the displacement of the oscillator for a given time period. Unlike the pointwise-in-time Sobol indices, which provide the sensitivity of the model parameters at each point in time, the GSI indices summarise the sensitivities of the model parameters over the entire time period.
2. **Toy example**
- The GSI sensitivity indices are computed for a toy model whose analytical solution is given in [1]_.
-
-.. [1] Gamboa F, Janon A, Klein T, Lagnoux A, others. Sensitivity analysis for multidimensional and functional outputs. Electronic journal of statistics 2014; 8(1): 575-603.
\ No newline at end of file
+ The GSI sensitivity indices are computed for a toy model whose analytical solution is given in :cite:`GSI`.
diff --git a/docs/source/bibliography.bib b/docs/source/bibliography.bib
index 3d2c3a281..7bd9acd0b 100644
--- a/docs/source/bibliography.bib
+++ b/docs/source/bibliography.bib
@@ -785,3 +785,19 @@ @article{CVM
URL = {https://doi.org/10.1137/15M1025621},
eprint = {https://doi.org/10.1137/15M1025621},
}
+
+
+# Generalised Sobol index
+@article{GSI,
+author = {Fabrice Gamboa and Alexandre Janon and Thierry Klein and Agnès Lagnoux},
+title = {{Sensitivity analysis for multidimensional and functional outputs}},
+volume = {8},
+journal = {Electronic Journal of Statistics},
+number = {1},
+publisher = {Institute of Mathematical Statistics and Bernoulli Society},
+pages = {575 -- 603},
+keywords = {Concentration inequalities, quadratic functionals, Semi-parametric efficient estimation, sensitivity analysis, Sobol indices, temporal output, vector output},
+year = {2014},
+doi = {10.1214/14-EJS895},
+URL = {https://doi.org/10.1214/14-EJS895}
+}
diff --git a/docs/source/sensitivity/generalised_sobol.rst b/docs/source/sensitivity/generalised_sobol.rst
index 402b3190e..1fcb5fd5a 100644
--- a/docs/source/sensitivity/generalised_sobol.rst
+++ b/docs/source/sensitivity/generalised_sobol.rst
@@ -1,7 +1,7 @@
Generalised Sobol indices
----------------------------------------
-A natural generalization of the Sobol indices (that are classically defined for single-output models) for multi-output models. The generalised Sobol indices are computed using the Pick-and-Freeze approach. (For implementation details, see also [1]_.)
+A natural generalization of the Sobol indices (that are classically defined for single-output models) for multi-output models. The generalised Sobol indices are computed using the Pick-and-Freeze approach. (For implementation details, see also :cite:`GSI`.)
Consider a model :math:`Y=f(X): \mathbb{R}^d \rightarrow \mathbb{R}^k` with :math:`d` inputs :math:`X=\left[ X_{1}, X_{2},…,X_{d} \right]` and :math:`k` outputs :math:`Y=\left[ Y_{1}, Y_{2},…,Y_{k} \right]`.
@@ -47,9 +47,6 @@ and
\Sigma_{N}=\frac{1}{N} \sum_{j=1}^{N} \frac{Y_{j} Y_{j}^{t}+Y_{j}^{\mathbf{i}}\left(Y_{j}^{\mathbf{i}}\right)^{t}}{2}-\left(\frac{1}{N} \sum_{j=1}^{N} \frac{Y_{j}+Y_{j}^{\mathbf{i}}}{2}\right)\left(\frac{1}{N} \sum_{j=1}^{N} \frac{Y_{j}+Y_{j}^{\mathbf{i}}}{2}\right)^{t}
-.. [1] Gamboa F, Janon A, Klein T, Lagnoux A, others. Sensitivity analysis for multidimensional and functional outputs. Electronic journal of statistics 2014; 8(1): 575-603.(`Link `_)
-
-
Generalised Sobol Class
^^^^^^^^^^^^^^^^^^^^^^^^^^
From dd6393f5155fafffdb5f2337090ac97bd5e22a40 Mon Sep 17 00:00:00 2001
From: Prateek Bhustali
Date: Mon, 23 May 2022 15:38:25 +0200
Subject: [PATCH 45/59] Added references to bibliography for Sobol
---
docs/source/bibliography.bib | 27 +++++++++++++++++++++++++++
docs/source/sensitivity/sobol.rst | 15 +++++----------
2 files changed, 32 insertions(+), 10 deletions(-)
diff --git a/docs/source/bibliography.bib b/docs/source/bibliography.bib
index 7bd9acd0b..f46fc199a 100644
--- a/docs/source/bibliography.bib
+++ b/docs/source/bibliography.bib
@@ -801,3 +801,30 @@ @article{GSI
doi = {10.1214/14-EJS895},
URL = {https://doi.org/10.1214/14-EJS895}
}
+
+# Sobol
+@book{saltelli_2008,
+ author = {Saltelli, A.},
+ description = {Global sensitivity analysis: the primer - Andrea Saltelli},
+ isbn = {9780470059975},
+ keywords = {sensitivity statistics},
+ lccn = {2007045551},
+ publisher = {John Wiley},
+ title = {Global sensitivity analysis: the primer},
+ url = {https://onlinelibrary.wiley.com/doi/book/10.1002/9780470725184},
+ year = 2008
+}
+
+@article{saltelli_2002,
+title = {Making best use of model evaluations to compute sensitivity indices},
+journal = {Computer Physics Communications},
+volume = {145},
+number = {2},
+pages = {280-297},
+year = {2002},
+issn = {0010-4655},
+doi = {https://doi.org/10.1016/S0010-4655(02)00280-1},
+url = {https://www.sciencedirect.com/science/article/pii/S0010465502002801},
+author = {Andrea Saltelli},
+keywords = {Sensitivity analysis, Sensitivity measures, Sensitivity indices, Importance measures},
+}
\ No newline at end of file
diff --git a/docs/source/sensitivity/sobol.rst b/docs/source/sensitivity/sobol.rst
index fee11e54e..60469b28c 100644
--- a/docs/source/sensitivity/sobol.rst
+++ b/docs/source/sensitivity/sobol.rst
@@ -31,7 +31,7 @@ Here, :math:`N` is the number of Monte Carlo samples and :math:`m` being the num
Compared to "Sobol1993", the "Janon2014" estimator makes more efficient use of model evaluations and produces smaller (better) confidence intervals.
-- Sobol1993: Requires :math:`N(m + 1)` model evaluations [1]_.
+- Sobol1993: Requires :math:`N(m + 1)` model evaluations :cite:`saltelli_2008`.
.. math::
S_{i} = \frac{\mathbb{V}\left[E\left(Y \mid X_{i}\right)\right]}{\mathbb{V}(Y)} = \frac{ (1/N) Y_{A} \cdot Y_{C_{i}}-f_{0}^{2}}{(1 / N) Y_{A} \cdot Y_{A}-f_{0}^{2}}
@@ -39,15 +39,15 @@ Compared to "Sobol1993", the "Janon2014" estimator makes more efficient use of m
.. math::
y_{A}=f(A), \quad y_{C_{i}}=f(C_{i}), \quad f_{0}^{2}=\left(\frac{1}{N} \sum_{j=1}^{N} y_{A}^{(j)} \right)^{2}
-- Saltelli2002: Requires :math:`N(2m + 2)` model evaluations [2]_.
+- Saltelli2002: Requires :math:`N(2m + 2)` model evaluations :cite:`saltelli_2002`.
2. **Second order indices** (:math:`S_{ij}`)
-- Saltelli2002: Requires :math:`N(2m + 2)` model evaluations [2]_.
+- Saltelli2002: Requires :math:`N(2m + 2)` model evaluations :cite:`saltelli_2002`.
3. **Total order indices** (:math:`S_{T_{i}}`)
-- Homma1996: Requires :math:`N(m + 1)` model evaluations [1]_.
+- Homma1996: Requires :math:`N(m + 1)` model evaluations :cite:`saltelli_2008`.
.. math::
S_{T_{i}} = 1 - \frac{\mathbb{V}\left[E\left(Y \mid \mathbf{X}_{\sim_{i}}\right)\right]}{\mathbb{V}(Y)} = 1 - \frac{ (1 / N) Y_{B} \cdot Y_{C_{i}}-f_{0}^{2}}{(1 / N) Y_{A} \cdot Y_{A}-f_{0}^{2}}
@@ -55,14 +55,9 @@ Compared to "Sobol1993", the "Janon2014" estimator makes more efficient use of m
.. math::
y_{A}=f(A), \quad y_{B}=f(B), \quad y_{C_{i}}=f(C_{i}), \quad f_{0}^{2}=\left(\frac{1}{2N} \sum_{j=1}^{N} y_{A}^{(j)} + y_{B}^{(j)} \right)^{2}
-- Saltelli2002: Requires :math:`N(2m + 2)` model evaluations [2]_.
+- Saltelli2002: Requires :math:`N(2m + 2)` model evaluations :cite:`saltelli_2002`.
-.. [1] Saltelli, A. (2008). Global sensitivity analysis: the primer.
- John Wiley. ISBN: 9780470059975
-
-.. [2] Saltelli, A. (2002). Making best use of model evaluations to compute sensitivity indices. (`Link `_)
-
Sobol Class
^^^^^^^^^^^^^^^^^^
From 08d954fcaed264a414c5f946d9788ac0145c374a Mon Sep 17 00:00:00 2001
From: Prateek Bhustali
Date: Mon, 6 Jun 2022 16:45:37 +0200
Subject: [PATCH 46/59] Added PostProcess module for sensitivity indices.
For postprocessing sensitivity studies.
Currently supports bar plots for the indices.
---
src/UQpy/sensitivity/PostProcess.py | 322 ++++++++++++++++++++++++++++
1 file changed, 322 insertions(+)
create mode 100644 src/UQpy/sensitivity/PostProcess.py
diff --git a/src/UQpy/sensitivity/PostProcess.py b/src/UQpy/sensitivity/PostProcess.py
new file mode 100644
index 000000000..77e17bfde
--- /dev/null
+++ b/src/UQpy/sensitivity/PostProcess.py
@@ -0,0 +1,322 @@
+"""
+This module is used to post-process the sensitivity analysis results. Currently it
+supports plotting the sensitivity results and comparing the sensitivity results
+(such first order index v/s total order index) using the following two methods:
+
+ 1. plot_index
+ 2. compare_index
+
+"""
+
+import math
+import itertools
+
+import numpy as np
+import matplotlib.pyplot as plt
+
+
+def plot_sensitivity_index(
+ indices,
+ confidence_interval=None,
+ plot_title=None,
+ variable_names=None,
+ **kwargs,
+):
+
+ """
+
+ This function plots the sensitivity indices (with confidence intervals)
+ in a bar plot.
+
+ **Inputs:**
+
+ * **indices** (list or ndarray):
+ list/array of sensitivity indices
+ Shape: (num_vars)
+
+ * **confidence_interval** (list or ndarray):
+ list/array of confidence interval for the sensitivity indices.
+ Shape: (num_vars, 2)
+
+ * **plot_title** (str):
+ Title of the plot
+ Default: "Sensitivity index"
+
+ * **variable_names** (list):
+ List of variable names
+ Default: [r"$X_{}$".format(i) for i in range(num_vars)]
+
+ * **kwargs (dict):
+ Keyword arguments for the plot to be passed to matplotlib.pyplot.bar
+
+ """
+
+ num_vars = len(indices)
+
+ if variable_names is None:
+ variable_names = [r"$X_{}$".format(i + 1) for i in range(num_vars)]
+
+ # Check if confidence intervals are available
+ if confidence_interval is not None:
+ conf_int_flag = True
+ error = confidence_interval[:, 1] - indices
+ else:
+ conf_int_flag = False
+
+ # x and y data
+ _idx = np.arange(num_vars)
+
+ indices = np.around(indices, decimals=2) # round to 2 decimal places
+
+ # Plot one index
+ fig, ax = plt.subplots()
+ width = 0.3
+ ax.spines["top"].set_visible(False)
+ ax.spines["right"].set_visible(False)
+
+ index_bar = ax.bar(
+ _idx, # x-axis
+ indices, # y-axis
+ width=width, # bar width
+ yerr=error if conf_int_flag else None, # error bars
+ ecolor="k", # error bar color
+ capsize=5, # error bar cap size in pt
+ **kwargs,
+ )
+
+ ax.bar_label(index_bar, label_type="edge", fontsize=10)
+ ax.set_xticks(_idx, variable_names)
+ ax.set_xlabel("Model inputs")
+ ax.set_ylim(top=1) # set only upper limit of y to 1
+ ax.set_title(plot_title)
+
+ plt.show()
+
+ return fig, ax
+
+
+def plot_index_comparison(
+ indices_1,
+ indices_2,
+ confidence_interval_1=None,
+ confidence_interval_2=None,
+ label_1=None,
+ label_2=None,
+ plot_title="Sensitivity index",
+ variable_names=None,
+ **kwargs,
+):
+
+ """
+
+ This function plots two sensitivity indices (with confidence intervals)
+ in a bar plot for comparison.
+ For example:
+ first order Sobol indices and total order Sobol indices
+ OR
+ first order Sobol indices and Chatterjee indices.
+
+ **Inputs:**
+
+ * **indices_1** (list or ndarray):
+ list/array of sensitivity indices
+ Shape: (num_vars)
+
+ * **indices_2** (list or ndarray):
+ list/array of sensitivity indices
+ Shape: (num_vars)
+
+ * **confidence_interval_1** (list or ndarray):
+ list/array of confidence interval for the sensitivity indices.
+ Shape: (num_vars, 2)
+ Default: None
+
+ * **confidence_interval_2** (list or ndarray):
+ list/array of confidenceiInterval for the sensitivity indices.
+ Shape: (num_vars, 2)
+ Default: None
+
+ * **plot_title** (str):
+ Title of the plot
+
+ * **variable_names** (list):
+ List of variable names
+ Default: [r"$X_{}$".format(i) for i in range(num_vars)]
+
+ * **kwargs (dict):
+ Keyword arguments for the plot to be passed to matplotlib.pyplot.bar
+
+ """
+
+ if indices_1 is None and indices_2 is None:
+ raise ValueError("Please provide two indices to plot")
+
+ if len(indices_1) != len(indices_2):
+ raise ValueError("indices_1 and indices_2 should have the same length")
+
+ num_vars = len(indices_1)
+
+ if variable_names is None:
+ variable_names = [r"$X_{}$".format(i + 1) for i in range(num_vars)]
+
+ # Check if confidence intervals are available
+ if confidence_interval_1 is not None:
+ conf_int_flag_1 = True
+ error_1 = confidence_interval_1[:, 1] - indices_1
+ else:
+ conf_int_flag_1 = False
+
+ if confidence_interval_2 is not None:
+ conf_int_flag_2 = True
+ error_2 = confidence_interval_2[:, 1] - indices_2
+ else:
+ conf_int_flag_2 = False
+
+ # x and y data
+ _idx = np.arange(num_vars)
+
+ indices_1 = np.around(indices_1, decimals=2) # round to 2 decimal places
+
+ if indices_2 is not None:
+ indices_2 = np.around(indices_2, decimals=2) # round to 2 decimal places
+
+ # Plot two indices side by side
+ fig, ax = plt.subplots()
+ width = 0.3
+ ax.spines["top"].set_visible(False)
+ ax.spines["right"].set_visible(False)
+
+ bar_indices_1 = ax.bar(
+ _idx - width / 2, # x-axis
+ indices_1, # y-axis
+ width=width, # bar width
+ color="C0", # bar color
+ # alpha=0.5, # bar transparency
+ label=label_1, # bar label
+ yerr=error_1 if conf_int_flag_1 else None,
+ ecolor="k", # error bar color
+ capsize=5, # error bar cap size in pt
+ )
+
+ bar_indices_2 = ax.bar(
+ _idx + width / 2, # x-axis
+ indices_2, # y-axis
+ width=width, # bar width
+ color="C1", # bar color
+ # alpha=0.5, # bar transparency
+ label=label_2, # bar label
+ yerr=error_2 if conf_int_flag_2 else None,
+ ecolor="k", # error bar color
+ capsize=5, # error bar cap size in pt
+ )
+
+ ax.bar_label(bar_indices_1, label_type="edge", fontsize=10)
+ ax.bar_label(bar_indices_2, label_type="edge", fontsize=10)
+ ax.set_xticks(_idx, variable_names)
+ ax.set_xlabel("Model inputs")
+ ax.set_title(plot_title)
+ ax.set_ylim(top=1) # set only upper limit of y to 1
+ ax.legend()
+
+ plt.show()
+
+ return fig, ax
+
+
+def plot_second_order_indices(
+ indices,
+ num_vars,
+ confidence_interval=None,
+ plot_title="Second order indices",
+ variable_names=None,
+ **kwargs,
+):
+
+ """
+
+ This function plots second order indices (with confidence intervals)
+ in a bar plot.
+
+ **Inputs:**
+
+ * **indices** (list or ndarray):
+ list/array of second order indices
+ Shape: (n_parameters)
+
+ * **confidence_interval** (list or ndarray):
+ list/array of confidence interval for the second order indices.
+ Shape: (n_p, 2)
+
+ * **label** (str):
+ Label of the plot
+
+ * **plot_title** (str):
+ Title of the plot
+
+ * **variable_names** (list):
+ List of variable names
+ Default: (Assumes that the indices are in lexicographic order.)
+ [r"$X_{}$".format(i) for i in range(n_parameters)]
+
+ * **kwargs (dict):
+ Keyword arguments for the plot to be passed to matplotlib.pyplot.bar
+
+ """
+
+ num_second_order_terms = len(indices)
+
+ if variable_names is None:
+ variable_names = [r"$X_{}$".format(i + 1) for i in range(num_vars)]
+
+ # All combinations of variables
+ all_combs = list(itertools.combinations(variable_names, 2))
+
+ # # Create a list of all combinations of variables
+ all_combs_list = [" ".join(comb) for comb in all_combs]
+
+ # Check if confidence intervals are available
+ if confidence_interval is not None:
+ conf_int_flag = True
+ error = confidence_interval[:, 1] - indices
+ else:
+ conf_int_flag = False
+
+ # x and y data
+ _idx = np.arange(num_second_order_terms)
+
+ indices = np.around(indices, decimals=2) # round to 2 decimal places
+
+ # Plot one index
+ fig, ax = plt.subplots()
+ width = 0.3
+ ax.spines["top"].set_visible(False)
+ ax.spines["right"].set_visible(False)
+
+ index_bar = ax.bar(
+ _idx, # x-axis
+ indices, # y-axis
+ width=width, # bar width
+ yerr=error if conf_int_flag else None, # error bars
+ ecolor="k", # error bar color
+ capsize=5, # error bar cap size in pt
+ **kwargs,
+ )
+
+ ax.bar_label(index_bar, label_type="edge", fontsize=10)
+
+ ax.set_xticks(_idx, all_combs_list)
+ # generally, there are many second order terms
+ # so we need to make sure that the labels are
+ # not overlapping. We do this by rotating the labels
+ plt.setp(
+ ax.get_xticklabels(),
+ rotation=30,
+ horizontalalignment="right",
+ )
+ ax.set_xlabel("Model inputs")
+ ax.set_ylim(top=1) # set only upper limit of y to 1
+ ax.set_title(plot_title)
+
+ plt.show()
+
+ return fig, ax
From d11e3955b0fc75166a39f6e87698a2e281304504 Mon Sep 17 00:00:00 2001
From: Prateek Bhustali
Date: Mon, 6 Jun 2022 16:46:03 +0200
Subject: [PATCH 47/59] Added plots in Sobol examples
---
.../sensitivity/sobol/plot_sobol_additive.py | 15 ++++++++++
.../code/sensitivity/sobol/plot_sobol_func.py | 28 +++++++++++++++++++
.../sensitivity/sobol/plot_sobol_ishigami.py | 26 +++++++++++++++++
3 files changed, 69 insertions(+)
diff --git a/docs/code/sensitivity/sobol/plot_sobol_additive.py b/docs/code/sensitivity/sobol/plot_sobol_additive.py
index 973e97dd6..51ee993c0 100644
--- a/docs/code/sensitivity/sobol/plot_sobol_additive.py
+++ b/docs/code/sensitivity/sobol/plot_sobol_additive.py
@@ -9,11 +9,16 @@
"""
# %%
+import numpy as np
+
from UQpy.run_model.RunModel import RunModel
from UQpy.run_model.model_execution.PythonModel import PythonModel
from UQpy.distributions import Normal
from UQpy.distributions.collection.JointIndependent import JointIndependent
from UQpy.sensitivity.Sobol import Sobol
+from UQpy.sensitivity.PostProcess import *
+
+np.random.seed(123)
# %% [markdown]
# **Define the model and input distributions**
@@ -56,3 +61,13 @@
# %%
computed_indices["sobol_i"]
+
+# %%
+# **Plot the first and total order sensitivity indices**
+fig1, ax1 = plot_index_comparison(
+ computed_indices["sobol_i"][:, 0],
+ computed_indices["sobol_total_i"][:, 0],
+ label_1="First order Sobol indices",
+ label_2="Total order Sobol indices",
+ plot_title="First and Total order Sobol indices",
+)
diff --git a/docs/code/sensitivity/sobol/plot_sobol_func.py b/docs/code/sensitivity/sobol/plot_sobol_func.py
index 5a5cb9389..7c6058831 100644
--- a/docs/code/sensitivity/sobol/plot_sobol_func.py
+++ b/docs/code/sensitivity/sobol/plot_sobol_func.py
@@ -26,6 +26,9 @@
from UQpy.distributions import Uniform
from UQpy.distributions.collection.JointIndependent import JointIndependent
from UQpy.sensitivity.Sobol import Sobol
+from UQpy.sensitivity.PostProcess import *
+
+np.random.seed(123)
# %% [markdown]
# **Define the model and input distributions**
@@ -76,6 +79,14 @@
# %%
computed_indices["sobol_i"]
+# %%
+# **Plot the first order sensitivity indices**
+fig1, ax1 = plot_sensitivity_index(
+ computed_indices["sobol_i"][:, 0],
+ plot_title="First order Sobol indices",
+ color="C0",
+)
+
# %% [markdown]
# **Total order Sobol indices**
#
@@ -97,6 +108,16 @@
# %%
computed_indices["sobol_total_i"]
+# %%
+# **Plot the first and total order sensitivity indices**
+fig2, ax2 = plot_index_comparison(
+ computed_indices["sobol_i"][:, 0],
+ computed_indices["sobol_total_i"][:, 0],
+ label_1="First order Sobol indices",
+ label_2="Total order Sobol indices",
+ plot_title="First and Total order Sobol indices",
+)
+
# %% [markdown]
# **Second order Sobol indices**
#
@@ -134,3 +155,10 @@
# %%
computed_indices["sobol_ij"]
+
+# %%
+# **Plot the second order sensitivity indices**
+fig3, ax3 = plot_second_order_indices(
+ computed_indices["sobol_ij"][:, 0],
+ num_vars=num_vars,
+)
diff --git a/docs/code/sensitivity/sobol/plot_sobol_ishigami.py b/docs/code/sensitivity/sobol/plot_sobol_ishigami.py
index dc1ce0c62..b63420290 100644
--- a/docs/code/sensitivity/sobol/plot_sobol_ishigami.py
+++ b/docs/code/sensitivity/sobol/plot_sobol_ishigami.py
@@ -46,6 +46,9 @@
from UQpy.distributions import Uniform
from UQpy.distributions.collection.JointIndependent import JointIndependent
from UQpy.sensitivity.Sobol import Sobol
+from UQpy.sensitivity.PostProcess import *
+
+np.random.seed(123)
# %% [markdown]
# **Define the model and input distributions**
@@ -112,3 +115,26 @@
# %%
computed_indices["confidence_interval_sobol_total_i"]
+
+# %%
+# **Plot the first order sensitivity indices**
+fig1, ax1 = plot_sensitivity_index(
+ computed_indices["sobol_i"][:, 0],
+ confidence_interval=computed_indices["confidence_interval_sobol_i"],
+ plot_title="First order Sobol indices",
+ variable_names=["$X_1$", "$X_2$", "$X_3$"],
+ color="C0",
+)
+
+# %%
+# **Plot the first and total order sensitivity indices**
+fig2, ax2 = plot_index_comparison(
+ computed_indices["sobol_i"][:, 0],
+ computed_indices["sobol_total_i"][:, 0],
+ confidence_interval_1=computed_indices["confidence_interval_sobol_i"],
+ confidence_interval_2=computed_indices["confidence_interval_sobol_total_i"],
+ label_1="First order Sobol indices",
+ label_2="Total order Sobol indices",
+ plot_title="First and Total order Sobol indices",
+ variable_names=["$X_1$", "$X_2$", "$X_3$"],
+)
From bb14e82c39af7f845df2c37d94f99687fb6728f9 Mon Sep 17 00:00:00 2001
From: Prateek Bhustali
Date: Mon, 6 Jun 2022 16:46:18 +0200
Subject: [PATCH 48/59] Added plots in Generalised Sobol examples
---
...ralised_sobol_mechanical_oscillator_ODE.py | 21 +++++++++
.../plot_generalised_sobol_multioutput.py | 45 ++++++++++++++++++-
2 files changed, 65 insertions(+), 1 deletion(-)
diff --git a/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_mechanical_oscillator_ODE.py b/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_mechanical_oscillator_ODE.py
index 716c498f7..dabc3dfe9 100644
--- a/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_mechanical_oscillator_ODE.py
+++ b/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_mechanical_oscillator_ODE.py
@@ -30,6 +30,9 @@
from UQpy.distributions import Uniform, Normal
from UQpy.distributions.collection.JointIndependent import JointIndependent
from UQpy.sensitivity.GeneralisedSobol import GeneralisedSobol
+from UQpy.sensitivity.PostProcess import *
+
+np.random.seed(123)
# %% [markdown]
# **Define the model and input distributions**
@@ -75,8 +78,26 @@
# %%
computed_indices["gen_sobol_i"]
+# **Plot the first order sensitivity indices**
+fig1, ax1 = plot_sensitivity_index(
+ computed_indices["gen_sobol_i"][:, 0],
+ plot_title="First order Generalised Sobol indices",
+ variable_names=[r"$m$", "$c$", "$k$", "$\ell$"],
+ color="C0",
+)
+
# %% [markdown]
# **Total order Generalised Sobol indices**
# %%
computed_indices["gen_sobol_total_i"]
+
+# **Plot the first and total order sensitivity indices**
+fig2, ax2 = plot_index_comparison(
+ computed_indices["gen_sobol_i"][:, 0],
+ computed_indices["gen_sobol_total_i"][:, 0],
+ label_1="First order",
+ label_2="Total order",
+ plot_title="First and Total order Generalised Sobol indices",
+ variable_names=[r"$m$", "$c$", "$k$", "$\ell$"],
+)
diff --git a/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_multioutput.py b/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_multioutput.py
index af4ca6ff3..0a2a7529c 100644
--- a/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_multioutput.py
+++ b/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_multioutput.py
@@ -23,6 +23,9 @@
from UQpy.distributions import Uniform, Normal
from UQpy.distributions.collection.JointIndependent import JointIndependent
from UQpy.sensitivity.GeneralisedSobol import GeneralisedSobol
+from UQpy.sensitivity.PostProcess import *
+
+np.random.seed(123)
# %% [markdown]
# **Define the model and input distributions**
@@ -64,9 +67,28 @@
# %%
computed_indices["gen_sobol_i"]
+# **Plot the first order sensitivity indices**
+fig1, ax1 = plot_sensitivity_index(
+ computed_indices["gen_sobol_i"][:, 0],
+ confidence_interval=computed_indices["confidence_interval_gen_sobol_i"],
+ plot_title="First order Generalised Sobol indices",
+ color="C0",
+)
+
# %%
computed_indices["gen_sobol_total_i"]
+# **Plot the first and total order sensitivity indices**
+fig2, ax2 = plot_index_comparison(
+ computed_indices["gen_sobol_i"][:, 0],
+ computed_indices["gen_sobol_total_i"][:, 0],
+ confidence_interval_1=computed_indices["confidence_interval_gen_sobol_i"],
+ confidence_interval_2=computed_indices["confidence_interval_gen_sobol_total_i"],
+ label_1="First order",
+ label_2="Total order",
+ plot_title="First and Total order Generalised Sobol indices",
+)
+
# %% [markdown]
# **Compute generalised Sobol indices**
@@ -75,7 +97,9 @@
SA = GeneralisedSobol(runmodel_obj, dist_object_2)
-computed_indices = SA.run(n_samples=100_000)
+computed_indices = SA.run(
+ n_samples=20_000, confidence_level=0.95, num_bootstrap_samples=5_00
+)
# %% [markdown]
# **First order Generalised Sobol indices**
@@ -91,5 +115,24 @@
# %%
computed_indices["gen_sobol_i"]
+# **Plot the first order sensitivity indices**
+fig3, ax3 = plot_sensitivity_index(
+ computed_indices["gen_sobol_i"][:, 0],
+ confidence_interval=computed_indices["confidence_interval_gen_sobol_i"],
+ plot_title="First order Generalised Sobol indices",
+ color="C0",
+)
+
# %%
computed_indices["gen_sobol_total_i"]
+
+# **Plot the first and total order sensitivity indices**
+fig4, ax4 = plot_index_comparison(
+ computed_indices["gen_sobol_i"][:, 0],
+ computed_indices["gen_sobol_total_i"][:, 0],
+ confidence_interval_1=computed_indices["confidence_interval_gen_sobol_i"],
+ confidence_interval_2=computed_indices["confidence_interval_gen_sobol_total_i"],
+ label_1="First order",
+ label_2="Total order",
+ plot_title="First and Total order Generalised Sobol indices",
+)
From ef52484b831764408d5cbeffb21eaa748f3a6e71 Mon Sep 17 00:00:00 2001
From: Prateek Bhustali
Date: Mon, 6 Jun 2022 16:46:38 +0200
Subject: [PATCH 49/59] Added plots in CVM index examples
---
.../cramer_von_mises/plot_cvm_exponential.py | 26 +++++++++++++++++++
.../cramer_von_mises/plot_cvm_sobol_func.py | 19 +++++++++++++-
2 files changed, 44 insertions(+), 1 deletion(-)
diff --git a/docs/code/sensitivity/cramer_von_mises/plot_cvm_exponential.py b/docs/code/sensitivity/cramer_von_mises/plot_cvm_exponential.py
index 28b390a47..589166732 100644
--- a/docs/code/sensitivity/cramer_von_mises/plot_cvm_exponential.py
+++ b/docs/code/sensitivity/cramer_von_mises/plot_cvm_exponential.py
@@ -21,6 +21,9 @@
from UQpy.distributions import Normal
from UQpy.distributions.collection.JointIndependent import JointIndependent
from UQpy.sensitivity.CramervonMises import CramervonMises as cvm
+from UQpy.sensitivity.PostProcess import *
+
+np.random.seed(123)
# %% [markdown]
# **Define the model and input distributions**
@@ -59,6 +62,13 @@
# %%
computed_indices["CVM_i"]
+# **Plot the CVM indices**
+fig1, ax1 = plot_sensitivity_index(
+ computed_indices["CVM_i"][:, 0],
+ plot_title="Cramér-von Mises indices",
+ color="C4",
+)
+
# %% [markdown]
# **Estimated first order Sobol indices**
#
@@ -71,8 +81,24 @@
# %%
computed_indices["sobol_i"]
+# **Plot the first order Sobol indices**
+fig2, ax2 = plot_sensitivity_index(
+ computed_indices["sobol_i"][:, 0],
+ plot_title="First order Sobol indices",
+ color="C0",
+)
+
# %% [markdown]
# **Estimated total order Sobol indices**
# %%
computed_indices["sobol_total_i"]
+
+# **Plot the first and total order sensitivity indices**
+fig3, ax3 = plot_index_comparison(
+ computed_indices["sobol_i"][:, 0],
+ computed_indices["sobol_total_i"][:, 0],
+ label_1="First order Sobol indices",
+ label_2="Total order Sobol indices",
+ plot_title="First and Total order Sobol indices",
+)
diff --git a/docs/code/sensitivity/cramer_von_mises/plot_cvm_sobol_func.py b/docs/code/sensitivity/cramer_von_mises/plot_cvm_sobol_func.py
index 7500c7259..443073593 100644
--- a/docs/code/sensitivity/cramer_von_mises/plot_cvm_sobol_func.py
+++ b/docs/code/sensitivity/cramer_von_mises/plot_cvm_sobol_func.py
@@ -26,6 +26,9 @@
from UQpy.distributions import Uniform
from UQpy.distributions.collection.JointIndependent import JointIndependent
from UQpy.sensitivity.CramervonMises import CramervonMises as cvm
+from UQpy.sensitivity.PostProcess import *
+
+np.random.seed(123)
# %% [markdown]
# **Define the model and input distributions**
@@ -54,7 +57,7 @@
SA = cvm(runmodel_obj, dist_object)
# Compute Sobol indices using the pick and freeze algorithm
-computed_indices = SA.run(n_samples=20_000, estimate_sobol_indices=True)
+computed_indices = SA.run(n_samples=50_000, estimate_sobol_indices=True)
# %% [markdown]
# **Cramér-von Mises indices**
@@ -62,6 +65,13 @@
# %%
computed_indices["CVM_i"]
+# **Plot the CVM indices**
+fig1, ax1 = plot_sensitivity_index(
+ computed_indices["CVM_i"][:, 0],
+ plot_title="Cramér-von Mises indices",
+ color="C4",
+)
+
# %% [markdown]
# **Estimated Sobol indices**
#
@@ -81,3 +91,10 @@
# %%
computed_indices["sobol_i"]
+
+# **Plot the first order Sobol indices**
+fig2, ax2 = plot_sensitivity_index(
+ computed_indices["sobol_i"][:, 0],
+ plot_title="First order Sobol indices",
+ color="C0",
+)
From 564086bc89d8e1e81505509c3f697819bc91f9ea Mon Sep 17 00:00:00 2001
From: Prateek Bhustali
Date: Mon, 6 Jun 2022 16:46:56 +0200
Subject: [PATCH 50/59] Added plots in Chatterjee index examples
---
.../chatterjee/plot_chatterjee_exponential.py | 10 ++++++++++
.../chatterjee/plot_chatterjee_ishigami.py | 18 ++++++++++++++++++
.../chatterjee/plot_chatterjee_sobol_func.py | 17 +++++++++++++++++
3 files changed, 45 insertions(+)
diff --git a/docs/code/sensitivity/chatterjee/plot_chatterjee_exponential.py b/docs/code/sensitivity/chatterjee/plot_chatterjee_exponential.py
index 8fa879847..0c67c7452 100644
--- a/docs/code/sensitivity/chatterjee/plot_chatterjee_exponential.py
+++ b/docs/code/sensitivity/chatterjee/plot_chatterjee_exponential.py
@@ -22,6 +22,9 @@
from UQpy.distributions import Normal
from UQpy.distributions.collection.JointIndependent import JointIndependent
from UQpy.sensitivity.Chatterjee import Chatterjee
+from UQpy.sensitivity.PostProcess import *
+
+np.random.seed(123)
# %% [markdown]
# **Define the model and input distributions**
@@ -64,3 +67,10 @@
# %%
computed_indices["chatterjee_i"]
+
+# **Plot the Chatterjee indices**
+fig1, ax1 = plot_sensitivity_index(
+ computed_indices["chatterjee_i"][:, 0],
+ plot_title="Chatterjee indices",
+ color="C2",
+)
diff --git a/docs/code/sensitivity/chatterjee/plot_chatterjee_ishigami.py b/docs/code/sensitivity/chatterjee/plot_chatterjee_ishigami.py
index 21803cc16..d7f08dcc5 100644
--- a/docs/code/sensitivity/chatterjee/plot_chatterjee_ishigami.py
+++ b/docs/code/sensitivity/chatterjee/plot_chatterjee_ishigami.py
@@ -22,6 +22,9 @@
from UQpy.distributions import Uniform
from UQpy.distributions.collection.JointIndependent import JointIndependent
from UQpy.sensitivity.Chatterjee import Chatterjee
+from UQpy.sensitivity.PostProcess import *
+
+np.random.seed(123)
# %% [markdown]
# **Define the model and input distributions**
@@ -65,6 +68,14 @@
# %%
computed_indices["confidence_interval_chatterjee_i"]
+# **Plot the Chatterjee indices**
+fig1, ax1 = plot_sensitivity_index(
+ computed_indices["chatterjee_i"][:, 0],
+ computed_indices["confidence_interval_chatterjee_i"],
+ plot_title="Chatterjee indices",
+ color="C2",
+)
+
# %% [markdown]
# **Estimated Sobol indices**
#
@@ -78,3 +89,10 @@
# %%
computed_indices["sobol_i"]
+
+# **Plot the first order Sobol indices**
+fig2, ax2 = plot_sensitivity_index(
+ computed_indices["sobol_i"][:, 0],
+ plot_title="First order Sobol indices",
+ color="C0",
+)
diff --git a/docs/code/sensitivity/chatterjee/plot_chatterjee_sobol_func.py b/docs/code/sensitivity/chatterjee/plot_chatterjee_sobol_func.py
index 439ffaa85..28e42f635 100644
--- a/docs/code/sensitivity/chatterjee/plot_chatterjee_sobol_func.py
+++ b/docs/code/sensitivity/chatterjee/plot_chatterjee_sobol_func.py
@@ -33,6 +33,9 @@
from UQpy.distributions import Uniform
from UQpy.distributions.collection.JointIndependent import JointIndependent
from UQpy.sensitivity.Chatterjee import Chatterjee
+from UQpy.sensitivity.PostProcess import *
+
+np.random.seed(123)
# %% [markdown]
# **Define the model and input distributions**
@@ -69,6 +72,13 @@
# %%
computed_indices["chatterjee_i"]
+# **Plot the Chatterjee indices**
+fig1, ax1 = plot_sensitivity_index(
+ computed_indices["chatterjee_i"][:, 0],
+ plot_title="Chatterjee indices",
+ color="C2",
+)
+
# %% [markdown]
# **Estimated Sobol indices**
#
@@ -88,3 +98,10 @@
# %%
computed_indices["sobol_i"]
+
+# **Plot the first order Sobol indices**
+fig2, ax2 = plot_sensitivity_index(
+ computed_indices["sobol_i"][:, 0],
+ plot_title="First order Sobol indices",
+ color="C0",
+)
From 0216941dad7ea51c8fd057c91b99f223a94f613b Mon Sep 17 00:00:00 2001
From: Prateek Bhustali
Date: Mon, 6 Jun 2022 16:58:45 +0200
Subject: [PATCH 51/59] Added type hints to PostProcess module
---
src/UQpy/sensitivity/PostProcess.py | 45 +++++++++++++++++------------
1 file changed, 27 insertions(+), 18 deletions(-)
diff --git a/src/UQpy/sensitivity/PostProcess.py b/src/UQpy/sensitivity/PostProcess.py
index 77e17bfde..ade3edb39 100644
--- a/src/UQpy/sensitivity/PostProcess.py
+++ b/src/UQpy/sensitivity/PostProcess.py
@@ -8,18 +8,23 @@
"""
-import math
import itertools
import numpy as np
import matplotlib.pyplot as plt
+from beartype import beartype
+from UQpy.utilities.ValidationTypes import (
+ NumpyFloatArray,
+)
+
+@beartype
def plot_sensitivity_index(
- indices,
- confidence_interval=None,
- plot_title=None,
- variable_names=None,
+ indices: NumpyFloatArray,
+ confidence_interval: NumpyFloatArray = None,
+ plot_title: str = None,
+ variable_names: list = None,
**kwargs,
):
@@ -95,15 +100,16 @@ def plot_sensitivity_index(
return fig, ax
+@beartype
def plot_index_comparison(
- indices_1,
- indices_2,
- confidence_interval_1=None,
- confidence_interval_2=None,
- label_1=None,
- label_2=None,
- plot_title="Sensitivity index",
- variable_names=None,
+ indices_1: NumpyFloatArray,
+ indices_2: NumpyFloatArray,
+ confidence_interval_1: NumpyFloatArray = None,
+ confidence_interval_2: NumpyFloatArray = None,
+ label_1: str = None,
+ label_2: str = None,
+ plot_title: str = "Sensitivity index",
+ variable_names: list = None,
**kwargs,
):
@@ -196,6 +202,7 @@ def plot_index_comparison(
yerr=error_1 if conf_int_flag_1 else None,
ecolor="k", # error bar color
capsize=5, # error bar cap size in pt
+ **kwargs,
)
bar_indices_2 = ax.bar(
@@ -208,6 +215,7 @@ def plot_index_comparison(
yerr=error_2 if conf_int_flag_2 else None,
ecolor="k", # error bar color
capsize=5, # error bar cap size in pt
+ **kwargs,
)
ax.bar_label(bar_indices_1, label_type="edge", fontsize=10)
@@ -223,12 +231,13 @@ def plot_index_comparison(
return fig, ax
+@beartype
def plot_second_order_indices(
- indices,
- num_vars,
- confidence_interval=None,
- plot_title="Second order indices",
- variable_names=None,
+ indices: NumpyFloatArray,
+ num_vars: int,
+ confidence_interval: NumpyFloatArray = None,
+ plot_title: str = "Second order indices",
+ variable_names: list = None,
**kwargs,
):
From 624c37325e6f7ff9bf90047562e571f27d183152 Mon Sep 17 00:00:00 2001
From: Prateek Bhustali
Date: Mon, 6 Jun 2022 18:23:07 +0200
Subject: [PATCH 52/59] Fixed minor typo
---
docs/code/sensitivity/chatterjee/plot_chatterjee_sobol_func.py | 2 +-
docs/code/sensitivity/cramer_von_mises/plot_cvm_exponential.py | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/docs/code/sensitivity/chatterjee/plot_chatterjee_sobol_func.py b/docs/code/sensitivity/chatterjee/plot_chatterjee_sobol_func.py
index 28e42f635..9da7d1cbb 100644
--- a/docs/code/sensitivity/chatterjee/plot_chatterjee_sobol_func.py
+++ b/docs/code/sensitivity/chatterjee/plot_chatterjee_sobol_func.py
@@ -63,7 +63,7 @@
# %% [markdown]
SA = Chatterjee(runmodel_obj, dist_object)
-# Compute Sobol indices using the pick and freeze algorithm
+# Compute Chatterjee indices using the pick and freeze algorithm
computed_indices = SA.run(n_samples=500_000, estimate_sobol_indices=True)
# %% [markdown]
diff --git a/docs/code/sensitivity/cramer_von_mises/plot_cvm_exponential.py b/docs/code/sensitivity/cramer_von_mises/plot_cvm_exponential.py
index 589166732..6f3f74b93 100644
--- a/docs/code/sensitivity/cramer_von_mises/plot_cvm_exponential.py
+++ b/docs/code/sensitivity/cramer_von_mises/plot_cvm_exponential.py
@@ -47,7 +47,7 @@
# %%
SA = cvm(runmodel_obj, dist_object)
-# Compute Sobol indices using the pick and freeze algorithm
+# Compute CVM indices using the pick and freeze algorithm
computed_indices = SA.run(n_samples=20_000, estimate_sobol_indices=True)
# %% [markdown]
From 7da93b1420e584141685a62df3e958199c02b8ee Mon Sep 17 00:00:00 2001
From: Prateek Bhustali
Date: Mon, 6 Jun 2022 18:24:06 +0200
Subject: [PATCH 53/59] Added a basic comparison of sensitivity indices
---
docs/code/sensitivity/comparison/README.rst | 6 +
.../sensitivity/comparison/local_additive.py | 21 +++
.../sensitivity/comparison/local_ishigami.py | 23 +++
.../sensitivity/comparison/plot_additive.py | 147 ++++++++++++++++
.../sensitivity/comparison/plot_ishigami.py | 165 ++++++++++++++++++
docs/source/conf.py | 2 +
docs/source/sensitivity/index.rst | 7 +
7 files changed, 371 insertions(+)
create mode 100644 docs/code/sensitivity/comparison/README.rst
create mode 100644 docs/code/sensitivity/comparison/local_additive.py
create mode 100644 docs/code/sensitivity/comparison/local_ishigami.py
create mode 100644 docs/code/sensitivity/comparison/plot_additive.py
create mode 100644 docs/code/sensitivity/comparison/plot_ishigami.py
diff --git a/docs/code/sensitivity/comparison/README.rst b/docs/code/sensitivity/comparison/README.rst
new file mode 100644
index 000000000..59f928b8b
--- /dev/null
+++ b/docs/code/sensitivity/comparison/README.rst
@@ -0,0 +1,6 @@
+Comparison of Sensitivity indices
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In this section we compare the sensitivity indices (Sobol, Cramér-von Mises and Chatterjee) available in the package using the 'Ishigami function' and the 'Additive model' to illustrate the differences.
+
+In both the examples, we note that the Cramér-von Mises indices and the Chatterjee indices are almost equal (as the Chatterjee indices converge to the Cramér-von Mises indices in the sample limit).
\ No newline at end of file
diff --git a/docs/code/sensitivity/comparison/local_additive.py b/docs/code/sensitivity/comparison/local_additive.py
new file mode 100644
index 000000000..a0893fa11
--- /dev/null
+++ b/docs/code/sensitivity/comparison/local_additive.py
@@ -0,0 +1,21 @@
+"""
+
+Auxiliary file
+==============================================
+
+"""
+
+import numpy as np
+
+
+def evaluate(X, params) -> np.array:
+ r"""A linear function that is used to demonstrate sensitivity indices.
+
+ .. math::
+ f(x) = a \cdot x_1 + b \cdot x_2
+ """
+ a, b = params
+
+ Y = a * X[:, 0] + b * X[:, 1]
+
+ return Y
diff --git a/docs/code/sensitivity/comparison/local_ishigami.py b/docs/code/sensitivity/comparison/local_ishigami.py
new file mode 100644
index 000000000..e5af649fe
--- /dev/null
+++ b/docs/code/sensitivity/comparison/local_ishigami.py
@@ -0,0 +1,23 @@
+"""
+
+Auxiliary file
+==============================================
+
+"""
+
+import numpy as np
+
+
+def evaluate(X, params=[7, 0.1]):
+ """Non-monotonic Ishigami-Homma three parameter test function"""
+
+ a = params[0]
+ b = params[1]
+
+ Y = (
+ np.sin(X[:, 0])
+ + a * np.power(np.sin(X[:, 1]), 2)
+ + b * np.power(X[:, 2], 4) * np.sin(X[:, 0])
+ )
+
+ return Y
diff --git a/docs/code/sensitivity/comparison/plot_additive.py b/docs/code/sensitivity/comparison/plot_additive.py
new file mode 100644
index 000000000..7c34e28bb
--- /dev/null
+++ b/docs/code/sensitivity/comparison/plot_additive.py
@@ -0,0 +1,147 @@
+"""
+
+Additive function
+==============================================
+
+.. math::
+ f(x) = a \cdot X_1 + b \cdot X_2, \quad X_1, X_2 \sim \mathcal{N}(0, 1), \quad a,b \in \mathbb{R}
+
+"""
+
+# %%
+import numpy as np
+import matplotlib.pyplot as plt
+
+from UQpy.run_model.RunModel import RunModel
+from UQpy.run_model.model_execution.PythonModel import PythonModel
+from UQpy.distributions import Normal
+from UQpy.distributions.collection.JointIndependent import JointIndependent
+from UQpy.sensitivity.Chatterjee import Chatterjee
+from UQpy.sensitivity.CramervonMises import CramervonMises as cvm
+from UQpy.sensitivity.Sobol import Sobol
+from UQpy.sensitivity.PostProcess import *
+
+np.random.seed(123)
+
+# %% [markdown]
+# **Define the model and input distributions**
+
+# Create Model object
+a, b = 1, 2
+
+model = PythonModel(
+ model_script="local_additive.py",
+ model_object_name="evaluate",
+ var_names=[
+ "X_1",
+ "X_2",
+ ],
+ delete_files=True,
+ params=[a, b],
+)
+
+runmodel_obj = RunModel(model=model)
+
+# Define distribution object
+dist_object = JointIndependent([Normal(0, 1)] * 2)
+
+# %% [markdown]
+# **Compute Sobol indices**
+
+# %% [markdown]
+SA_sobol = Sobol(runmodel_obj, dist_object)
+
+computed_indices_sobol = SA_sobol.run(n_samples=50_000)
+
+# %% [markdown]
+# **First order Sobol indices**
+#
+# Expected first order Sobol indices:
+#
+# :math:`\mathrm{S}_1 = \frac{a^2 \cdot \mathbb{V}[X_1]}{a^2 \cdot \mathbb{V}[X_1] + b^2 \cdot \mathbb{V}[X_2]} = \frac{1^2 \cdot 1}{1^2 \cdot 1 + 2^2 \cdot 1} = 0.2`
+#
+# :math:`\mathrm{S}_2 = \frac{b^2 \cdot \mathbb{V}[X_2]}{a^2 \cdot \mathbb{V}[X_1] + b^2 \cdot \mathbb{V}[X_2]} = \frac{2^2 \cdot 1}{1^2 \cdot 1 + 2^2 \cdot 1} = 0.8`
+
+# %%
+computed_indices_sobol["sobol_i"]
+
+# %% [markdown]
+# **Compute Chatterjee indices**
+
+# %% [markdown]
+SA_chatterjee = Chatterjee(runmodel_obj, dist_object)
+
+computed_indices_chatterjee = SA_chatterjee.run(n_samples=50_000)
+
+# %%
+computed_indices_chatterjee["chatterjee_i"]
+
+# %%
+SA_cvm = cvm(runmodel_obj, dist_object)
+
+# Compute CVM indices using the pick and freeze algorithm
+computed_indices_cvm = SA_cvm.run(n_samples=20_000, estimate_sobol_indices=True)
+
+# %%
+computed_indices_cvm["CVM_i"]
+
+# %%
+# **Plot all indices**
+
+num_vars = 2
+_idx = np.arange(num_vars)
+variable_names = [r"$X_{}$".format(i + 1) for i in range(num_vars)]
+
+# round to 2 decimal places
+indices_1 = np.around(computed_indices_sobol["sobol_i"][:, 0], decimals=2)
+indices_2 = np.around(computed_indices_chatterjee["chatterjee_i"][:, 0], decimals=2)
+indices_3 = np.around(computed_indices_cvm["CVM_i"][:, 0], decimals=2)
+
+fig, ax = plt.subplots()
+width = 0.3
+ax.spines["top"].set_visible(False)
+ax.spines["right"].set_visible(False)
+
+bar_indices_1 = ax.bar(
+ _idx - width, # x-axis
+ indices_1, # y-axis
+ width=width, # bar width
+ color="C0", # bar color
+ # alpha=0.5, # bar transparency
+ label="Sobol", # bar label
+ ecolor="k", # error bar color
+ capsize=5, # error bar cap size in pt
+)
+
+bar_indices_2 = ax.bar(
+ _idx, # x-axis
+ indices_2, # y-axis
+ width=width, # bar width
+ color="C2", # bar color
+ # alpha=0.5, # bar transparency
+ label="Chatterjee", # bar label
+ ecolor="k", # error bar color
+ capsize=5, # error bar cap size in pt
+)
+
+bar_indices_3 = ax.bar(
+ _idx + width, # x-axis
+ indices_3, # y-axis
+ width=width, # bar width
+ color="C3", # bar color
+ # alpha=0.5, # bar transparency
+ label="Cramér-von Mises", # bar label
+ ecolor="k", # error bar color
+ capsize=5, # error bar cap size in pt
+)
+
+ax.bar_label(bar_indices_1, label_type="edge", fontsize=10)
+ax.bar_label(bar_indices_2, label_type="edge", fontsize=10)
+ax.bar_label(bar_indices_3, label_type="edge", fontsize=10)
+ax.set_xticks(_idx, variable_names)
+ax.set_xlabel("Model inputs")
+ax.set_title("Comparison of sensitivity indices")
+ax.set_ylim(top=1) # set only upper limit of y to 1
+ax.legend()
+
+plt.show()
diff --git a/docs/code/sensitivity/comparison/plot_ishigami.py b/docs/code/sensitivity/comparison/plot_ishigami.py
new file mode 100644
index 000000000..116245734
--- /dev/null
+++ b/docs/code/sensitivity/comparison/plot_ishigami.py
@@ -0,0 +1,165 @@
+r"""
+
+Ishigami function
+==============================================
+
+The ishigami function is a non-linear, non-monotonic function that is commonly used to
+benchmark uncertainty and senstivity analysis methods.
+
+.. math::
+ f(x_1, x_2, x_3) = sin(x_1) + a \cdot sin^2(x_2) + b \cdot x_3^4 sin(x_1)
+
+.. math::
+ x_1, x_2, x_3 \sim \mathcal{U}(-\pi, \pi), \quad a, b\in \mathbb{R}
+
+"""
+
+# %%
+import numpy as np
+
+from UQpy.run_model.RunModel import RunModel
+from UQpy.run_model.model_execution.PythonModel import PythonModel
+from UQpy.distributions import Uniform
+from UQpy.distributions.collection.JointIndependent import JointIndependent
+from UQpy.sensitivity.Chatterjee import Chatterjee
+from UQpy.sensitivity.CramervonMises import CramervonMises as cvm
+from UQpy.sensitivity.Sobol import Sobol
+from UQpy.sensitivity.PostProcess import *
+
+np.random.seed(123)
+
+# %% [markdown]
+# **Define the model and input distributions**
+
+# %%
+# Create Model object
+model = PythonModel(
+ model_script="local_ishigami.py",
+ model_object_name="evaluate",
+ var_names=[r"$X_1$", "$X_2$", "$X_3$"],
+ delete_files=True,
+ params=[7, 0.1],
+)
+
+runmodel_obj = RunModel(model=model)
+
+# Define distribution object
+dist_object = JointIndependent([Uniform(-np.pi, 2 * np.pi)] * 3)
+
+# %% [markdown]
+# **Compute Sobol indices**
+
+# %%
+SA_sobol = Sobol(runmodel_obj, dist_object)
+
+computed_indices_sobol = SA_sobol.run(n_samples=100_000)
+
+# %% [markdown]
+# **First order Sobol indices**
+#
+# Expected first order Sobol indices:
+#
+# :math:`S_1` = 0.3139
+#
+# :math:`S_2` = 0.4424
+#
+# :math:`S_3` = 0.0
+
+# %%
+computed_indices_sobol["sobol_i"]
+
+# %% [markdown]
+# **Total order Sobol indices**
+#
+# Expected total order Sobol indices:
+#
+# :math:`S_{T_1}` = 0.55758886
+#
+# :math:`S_{T_2}` = 0.44241114
+#
+# :math:`S_{T_3}` = 0.24368366
+
+# %%
+computed_indices_sobol["sobol_total_i"]
+
+# %% [markdown]
+# **Compute Chatterjee indices**
+
+# %% [markdown]
+SA_chatterjee = Chatterjee(runmodel_obj, dist_object)
+
+computed_indices_chatterjee = SA_chatterjee.run(n_samples=50_000)
+
+# %%
+computed_indices_chatterjee["chatterjee_i"]
+
+# %% [markdown]
+# **Compute Cramér-von Mises indices**
+SA_cvm = cvm(runmodel_obj, dist_object)
+
+# Compute CVM indices using the pick and freeze algorithm
+computed_indices_cvm = SA_cvm.run(n_samples=20_000, estimate_sobol_indices=True)
+
+# %%
+computed_indices_cvm["CVM_i"]
+
+# %%
+# **Plot all indices**
+
+num_vars = 3
+_idx = np.arange(num_vars)
+variable_names = [r"$X_{}$".format(i + 1) for i in range(num_vars)]
+
+# round to 2 decimal places
+indices_1 = np.around(computed_indices_sobol["sobol_i"][:, 0], decimals=2)
+indices_2 = np.around(computed_indices_chatterjee["chatterjee_i"][:, 0], decimals=2)
+indices_3 = np.around(computed_indices_cvm["CVM_i"][:, 0], decimals=2)
+
+fig, ax = plt.subplots()
+width = 0.3
+ax.spines["top"].set_visible(False)
+ax.spines["right"].set_visible(False)
+
+bar_indices_1 = ax.bar(
+ _idx - width, # x-axis
+ indices_1, # y-axis
+ width=width, # bar width
+ color="C0", # bar color
+ # alpha=0.5, # bar transparency
+ label="Sobol", # bar label
+ ecolor="k", # error bar color
+ capsize=5, # error bar cap size in pt
+)
+
+bar_indices_2 = ax.bar(
+ _idx, # x-axis
+ indices_2, # y-axis
+ width=width, # bar width
+ color="C2", # bar color
+ # alpha=0.5, # bar transparency
+ label="Chatterjee", # bar label
+ ecolor="k", # error bar color
+ capsize=5, # error bar cap size in pt
+)
+
+bar_indices_3 = ax.bar(
+ _idx + width, # x-axis
+ indices_3, # y-axis
+ width=width, # bar width
+ color="C3", # bar color
+ # alpha=0.5, # bar transparency
+ label="Cramér-von Mises", # bar label
+ ecolor="k", # error bar color
+ capsize=5, # error bar cap size in pt
+)
+
+ax.bar_label(bar_indices_1, label_type="edge", fontsize=10)
+ax.bar_label(bar_indices_2, label_type="edge", fontsize=10)
+ax.bar_label(bar_indices_3, label_type="edge", fontsize=10)
+ax.set_xticks(_idx, variable_names)
+ax.set_xlabel("Model inputs")
+ax.set_title("Comparison of sensitivity indices")
+ax.set_ylim(top=1) # set only upper limit of y to 1
+ax.legend()
+
+plt.show()
diff --git a/docs/source/conf.py b/docs/source/conf.py
index 68538001d..3c7c10316 100644
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -95,6 +95,7 @@
"../code/sensitivity/cramer_von_mises",
"../code/sensitivity/chatterjee",
"../code/sensitivity/generalised_sobol",
+ "../code/sensitivity/comparison",
"../code/stochastic_processes/bispectral",
"../code/stochastic_processes/karhunen_loeve",
"../code/stochastic_processes/spectral",
@@ -133,6 +134,7 @@
"auto_examples/sensitivity/cramer_von_mises",
"auto_examples/sensitivity/chatterjee",
"auto_examples/sensitivity/generalised_sobol",
+ "auto_examples/sensitivity/comparison",
"auto_examples/stochastic_processes/bispectral",
"auto_examples/stochastic_processes/karhunen_loeve",
"auto_examples/stochastic_processes/spectral",
diff --git a/docs/source/sensitivity/index.rst b/docs/source/sensitivity/index.rst
index 1b2a8367d..161cfd3b2 100644
--- a/docs/source/sensitivity/index.rst
+++ b/docs/source/sensitivity/index.rst
@@ -26,3 +26,10 @@ Sensitivity analysis comprises techniques focused on determining how the variati
Morris Sensitivity
Polynomial Chaos Sensitivity
Sobol Sensitivity
+
+Examples
+""""""""""
+
+.. toctree::
+
+ Comparison of indices <../auto_examples/sensitivity/comparison/index>
From 08d9180144873404704e3f9f2d13990fdaeb4428 Mon Sep 17 00:00:00 2001
From: Prateek Bhustali
Date: Sun, 3 Jul 2022 17:11:49 +0200
Subject: [PATCH 54/59] Minor fix
---
docs/code/sensitivity/chatterjee/local_sobol_func.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docs/code/sensitivity/chatterjee/local_sobol_func.py b/docs/code/sensitivity/chatterjee/local_sobol_func.py
index 1ccabc6dd..dea2e6714 100644
--- a/docs/code/sensitivity/chatterjee/local_sobol_func.py
+++ b/docs/code/sensitivity/chatterjee/local_sobol_func.py
@@ -27,7 +27,7 @@ def sensitivities(a_values):
Total_order = np.zeros((dims, 1))
- V_i = (3 * (1 + a_values) ** 2) ** (-1)
+ V_i = 1 / (3 * (1 + a_values) ** 2)
total_variance = np.prod(1 + V_i) - 1
From dfa17e9390bffd59958716b29a03f1edebbdc2b8 Mon Sep 17 00:00:00 2001
From: Prateek Bhustali
Date: Sun, 3 Jul 2022 17:12:38 +0200
Subject: [PATCH 55/59] Added convergence study
---
.../chatterjee/plot_chatterjee_sobol_func.py | 81 ++++++++++++++++---
1 file changed, 72 insertions(+), 9 deletions(-)
diff --git a/docs/code/sensitivity/chatterjee/plot_chatterjee_sobol_func.py b/docs/code/sensitivity/chatterjee/plot_chatterjee_sobol_func.py
index 9da7d1cbb..5659f6d53 100644
--- a/docs/code/sensitivity/chatterjee/plot_chatterjee_sobol_func.py
+++ b/docs/code/sensitivity/chatterjee/plot_chatterjee_sobol_func.py
@@ -21,7 +21,10 @@
.. math::
x_i \sim \mathcal{U}(0, 1), \quad a_i \in \mathbb{R}.
-.. [1] Fabrice Gamboa, Pierre Gremaud, Thierry Klein, and Agnès Lagnoux. (2020). Global Sensitivity Analysis: a new generation of mighty estimators based on rank statistics.
+Finally, we also compare the convergence rate of the Pick and Freeze approach with the
+rank statistics approach as in [1]_.
+
+.. [1] Fabrice Gamboa, Pierre Gremaud, Thierry Klein, and Agnès Lagnoux. (2020). Global Sensitivity Analysis: a new generation of mighty estimators based on rank statistics. (`Link `_)
"""
@@ -33,6 +36,7 @@
from UQpy.distributions import Uniform
from UQpy.distributions.collection.JointIndependent import JointIndependent
from UQpy.sensitivity.Chatterjee import Chatterjee
+from UQpy.sensitivity.Sobol import Sobol
from UQpy.sensitivity.PostProcess import *
np.random.seed(123)
@@ -42,7 +46,7 @@
# Create Model object
num_vars = 6
-a_vals = np.array([0.0, 0.5, 3.0, 9.0, 99.0, 99.0])
+a_vals = np.arange(1, num_vars+1, 1)
model = PythonModel(
model_script="local_sobol_func.py",
@@ -63,7 +67,7 @@
# %% [markdown]
SA = Chatterjee(runmodel_obj, dist_object)
-# Compute Chatterjee indices using the pick and freeze algorithm
+# Compute Chatterjee indices using rank statistics
computed_indices = SA.run(n_samples=500_000, estimate_sobol_indices=True)
# %% [markdown]
@@ -84,17 +88,17 @@
#
# Expected first order Sobol indices:
#
-# :math:`S_1` = 5.86781190e-01
+# :math:`S_1` = 0.46067666
#
-# :math:`S_2` = 2.60791640e-01
+# :math:`S_2` = 0.20474518
#
-# :math:`S_3` = 3.66738244e-02
+# :math:`S_3` = 0.11516917
#
-# :math:`S_4` = 5.86781190e-03
+# :math:`S_4` = 0.07370827
#
-# :math:`S_5` = 5.86781190e-05
+# :math:`S_5` = 0.0511863
#
-# :math:`S_6` = 5.86781190e-05
+# :math:`S_6` = 0.03760626
# %%
computed_indices["sobol_i"]
@@ -105,3 +109,62 @@
plot_title="First order Sobol indices",
color="C0",
)
+
+# %% [markdown]
+# **Comparing convergence rate of rank statistics and the Pick and Freeze approach**
+#
+# In the Pick-Freeze estimations, several sizes of sample N have been considered:
+# N = 100, 500, 1000, 5000, 10000, 50000, and 100000.
+# The Pick-Freeze procedure requires (p + 1) samples of size N.
+# To have a fair comparison, the sample sizes considered in the estimation using
+# rank statistics are n = (p+1)N = 7N.
+# We observe that both methods converge and give precise results for large sample sizes.
+
+# %%
+
+# Compute indices values for equal number of model evaluations
+
+true_values = np.array([0.46067666,
+ 0.20474518,
+ 0.11516917,
+ 0.07370827,
+ 0.0511863 ,
+ 0.03760626])
+
+sample_sizes = [100, 500, 1_000, 5_000, 10_000, 50_000, 100_000]
+num_studies = len(sample_sizes)
+
+store_pick_freeze = np.zeros((num_vars, num_studies))
+store_rank_stats = np.zeros((num_vars, num_studies))
+
+SA_chatterjee = Chatterjee(runmodel_obj, dist_object)
+SA_sobol = Sobol(runmodel_obj, dist_object)
+
+for i, sample_size in enumerate(sample_sizes):
+
+ # Estimate using rank statistics
+ _indices = SA_chatterjee.run(n_samples=sample_size*7, estimate_sobol_indices=True)
+ store_rank_stats[:, i] = _indices["sobol_i"].ravel()
+
+ # Estimate using Pick and Freeze approach
+ _indices = SA_sobol.run(n_samples=sample_size)
+ store_pick_freeze[:, i] = _indices["sobol_i"].ravel()
+
+# %%
+
+## Convergence plot
+
+fix, ax = plt.subplots(2, 3, figsize=(30, 15))
+
+for k in range(num_vars):
+
+ i, j = divmod(k, 3) # (built-in) divmod(a, b) returns a tuple (a // b, a % b)
+
+ ax[i][j].semilogx(sample_sizes, store_rank_stats[k, :], 'ro-', label='Chatterjee estimate')
+ ax[i][j].semilogx(sample_sizes, store_pick_freeze[k, :], 'bx-', label='Pick and Freeze estimate')
+ ax[i][j].hlines(true_values[k], 0, sample_sizes[-1], 'k', label='True indices')
+ ax[i][j].set_title(r'$S^' + str(k+1) + '$ = ' + str(np.round(true_values[k], 4)))
+
+plt.suptitle('Comparing convergence of the Chatterjee estimate and the Pick and Freeze approach')
+plt.legend()
+plt.show()
From 786c6e58b3ec37913ea2a9b6f84e2a149c86b44b Mon Sep 17 00:00:00 2001
From: Prateek Bhustali
Date: Sun, 3 Jul 2022 17:13:09 +0200
Subject: [PATCH 56/59] Changed a_values in Sobol func
---
.../cramer_von_mises/plot_cvm_sobol_func.py | 21 ++++++++++++-------
1 file changed, 13 insertions(+), 8 deletions(-)
diff --git a/docs/code/sensitivity/cramer_von_mises/plot_cvm_sobol_func.py b/docs/code/sensitivity/cramer_von_mises/plot_cvm_sobol_func.py
index 443073593..8624faec7 100644
--- a/docs/code/sensitivity/cramer_von_mises/plot_cvm_sobol_func.py
+++ b/docs/code/sensitivity/cramer_von_mises/plot_cvm_sobol_func.py
@@ -16,6 +16,11 @@
.. math::
x_i \sim \mathcal{U}(0, 1), \quad a_i \in \mathbb{R}.
+
+The function was also used in the Chatterjee indices section to demonstrate the
+computation of the Chatterjee indices. We can see clearly that the estimates are
+equivalent.
+
"""
# %%
@@ -35,7 +40,7 @@
# Create Model object
num_vars = 6
-a_vals = np.array([0.0, 0.5, 3.0, 9.0, 99.0, 99.0])
+a_vals = np.arange(1, num_vars+1, 1)
model = PythonModel(
model_script="local_sobol_func.py",
@@ -56,7 +61,7 @@
# %%
SA = cvm(runmodel_obj, dist_object)
-# Compute Sobol indices using the pick and freeze algorithm
+# Compute Sobol indices using rank statistics
computed_indices = SA.run(n_samples=50_000, estimate_sobol_indices=True)
# %% [markdown]
@@ -77,17 +82,17 @@
#
# Expected first order Sobol indices:
#
-# :math:`S_1` = 5.86781190e-01
+# :math:`S_1` = 0.46067666
#
-# :math:`S_2` = 2.60791640e-01
+# :math:`S_2` = 0.20474518
#
-# :math:`S_3` = 3.66738244e-02
+# :math:`S_3` = 0.11516917
#
-# :math:`S_4` = 5.86781190e-03
+# :math:`S_4` = 0.07370827
#
-# :math:`S_5` = 5.86781190e-05
+# :math:`S_5` = 0.0511863
#
-# :math:`S_6` = 5.86781190e-05
+# :math:`S_6` = 0.03760626
# %%
computed_indices["sobol_i"]
From af5e2a57643d0fed0317a997daa41eee1330c167 Mon Sep 17 00:00:00 2001
From: Prateek Bhustali
Date: Sun, 3 Jul 2022 17:14:42 +0200
Subject: [PATCH 57/59] Minor fixes to docstring
---
.../plot_generalised_sobol_mechanical_oscillator_ODE.py | 5 ++++-
.../generalised_sobol/plot_generalised_sobol_multioutput.py | 5 +++++
.../code/sensitivity/sobol/plot_mechanical_oscillator_ODE.py | 5 ++++-
docs/code/sensitivity/sobol/plot_sobol_additive.py | 5 +++++
docs/code/sensitivity/sobol/plot_sobol_func.py | 5 +++++
src/UQpy/sensitivity/Chatterjee.py | 4 ++--
6 files changed, 25 insertions(+), 4 deletions(-)
diff --git a/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_mechanical_oscillator_ODE.py b/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_mechanical_oscillator_ODE.py
index dabc3dfe9..104703c76 100644
--- a/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_mechanical_oscillator_ODE.py
+++ b/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_mechanical_oscillator_ODE.py
@@ -3,7 +3,8 @@
Mechanical oscillator model (multioutput)
==============================================
-The mechanical oscillator is governed by the following second-order ODE:
+In this example, we consider the mechanical oscillator is governed by the following
+second-order ODE as demonstrated in [1]_:
.. math::
m \ddot{x} + c \dot{x} + k x = 0
@@ -20,6 +21,8 @@
parameters at each point in time, the GSI indices summarise the sensitivities of the
model parameters over the entire time period.
+.. [1] Gamboa, F., Janon, A., Klein, T., & Lagnoux, A. (2014). Sensitivity analysis for multidimensional and functional outputs. Electronic Journal of Statistics, 8(1), 575-603.
+
"""
# %%
diff --git a/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_multioutput.py b/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_multioutput.py
index 0a2a7529c..7cdd78024 100644
--- a/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_multioutput.py
+++ b/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_multioutput.py
@@ -3,6 +3,9 @@
Toy multioutput function
==============================================
+In this example, we demonstrate the computation of the Generalised Sobol indices using
+the toy example in [1]_.
+
.. math::
Y = f (X_{1}, X_{2}) := \left(\begin{array}{c}
X_{1}+X_{2}+X_{1} X_{2} \\
@@ -15,6 +18,8 @@
.. math::
\text{case 2: } X_1, X_2 \sim \mathcal{U}(0, 1)
+.. [1] Gamboa, F., Janon, A., Klein, T., & Lagnoux, A. (2014). Sensitivity analysis for multidimensional and functional outputs. Electronic Journal of Statistics, 8(1), 575-603.
+
"""
# %%
diff --git a/docs/code/sensitivity/sobol/plot_mechanical_oscillator_ODE.py b/docs/code/sensitivity/sobol/plot_mechanical_oscillator_ODE.py
index 06d1a66b1..b15e65006 100644
--- a/docs/code/sensitivity/sobol/plot_mechanical_oscillator_ODE.py
+++ b/docs/code/sensitivity/sobol/plot_mechanical_oscillator_ODE.py
@@ -3,7 +3,8 @@
Mechanical oscillator model (multioutput)
==============================================
-The mechanical oscillator is governed by the following second-order ODE:
+In this example, we consider the mechanical oscillator is governed by the following
+second-order ODE as demonstrated in [1]_:
.. math::
m \ddot{x} + c \dot{x} + k x = 0
@@ -20,6 +21,8 @@
pointwise-in-time Sobol indices. These indices describe the sensitivity of the model
parameters at each point in time.
+.. [1] Gamboa, F., Janon, A., Klein, T., & Lagnoux, A. (2014). Sensitivity analysis for multidimensional and functional outputs. Electronic Journal of Statistics, 8(1), 575-603.
+
"""
# %%
diff --git a/docs/code/sensitivity/sobol/plot_sobol_additive.py b/docs/code/sensitivity/sobol/plot_sobol_additive.py
index 51ee993c0..0cb860d48 100644
--- a/docs/code/sensitivity/sobol/plot_sobol_additive.py
+++ b/docs/code/sensitivity/sobol/plot_sobol_additive.py
@@ -3,9 +3,14 @@
Additive function
==============================================
+We introduce the variance-based Sobol indices using an elementary example.
+For more details, refer [1]_.
+
.. math::
f(x) = a \cdot X_1 + b \cdot X_2, \quad X_1, X_2 \sim \mathcal{N}(0, 1), \quad a,b \in \mathbb{R}
+.. [1] Saltelli A, T. (2008). Global sensitivity analysis: The primer. John Wiley.
+
"""
# %%
diff --git a/docs/code/sensitivity/sobol/plot_sobol_func.py b/docs/code/sensitivity/sobol/plot_sobol_func.py
index 7c6058831..505a23c04 100644
--- a/docs/code/sensitivity/sobol/plot_sobol_func.py
+++ b/docs/code/sensitivity/sobol/plot_sobol_func.py
@@ -16,6 +16,11 @@
.. math::
x_i \sim \mathcal{U}(0, 1), \quad a_i \in \mathbb{R}.
+This is an example from [1]_, where first order, total order and additionally the second
+order indices are computed.
+
+.. [1] Glen, G., & Isaacs, K. (2012). Estimating Sobol sensitivity indices using correlations. Environmental Modelling and Software, 37, 157–166.
+
"""
# %%
diff --git a/src/UQpy/sensitivity/Chatterjee.py b/src/UQpy/sensitivity/Chatterjee.py
index 53c470b5b..f56b5cbc9 100644
--- a/src/UQpy/sensitivity/Chatterjee.py
+++ b/src/UQpy/sensitivity/Chatterjee.py
@@ -101,8 +101,8 @@ def run(
"""
Compute the sensitivity indices using the Chatterjee method.
- :param n_samples: Number of samples used to compute the Cramér-von Mises indices. \
- Default is 1,000.
+ :param n_samples: Number of samples used to compute the Chatterjee indices. \
+ Default is 1,000.
:param estimate_sobol_indices: If :code:`True`, the Sobol indices are estimated \
using the pick-and-freeze samples.
From ee1d892118a4a594a99519afd071497b4275f46c Mon Sep 17 00:00:00 2001
From: Prateek Bhustali
Date: Sun, 3 Jul 2022 17:15:06 +0200
Subject: [PATCH 58/59] Minor changes in documentation
---
docs/source/sensitivity/chatterjee.rst | 3 ++-
docs/source/sensitivity/sobol.rst | 2 +-
2 files changed, 3 insertions(+), 2 deletions(-)
diff --git a/docs/source/sensitivity/chatterjee.rst b/docs/source/sensitivity/chatterjee.rst
index 57921b4b2..42ee716e8 100644
--- a/docs/source/sensitivity/chatterjee.rst
+++ b/docs/source/sensitivity/chatterjee.rst
@@ -9,8 +9,9 @@ Consider :math:`n` samples of random variables :math:`X` and :math:`Y`, with :ma
\xi_{n}(X, Y):=1-\frac{3 \sum_{i=1}^{n-1}\left|r_{i+1}-r_{i}\right|}{n^{2}-1}
-The Chatterjee index converges for :math:`n \rightarrow \infty` to the Cramér-von Mises index and is faster to estimate than using the Pick and Freeze approach in the Cramér-von Mises index.
+The Chatterjee index converges for :math:`n \rightarrow \infty` to the Cramér-von Mises index and is faster to estimate than using the Pick and Freeze approach to compute the the Cramér-von Mises index.
+Furthermore, the Sobol indices can be efficiently estimated by leveraging the same rank statistics, which has the advantage that any sample can be used and no specific pick and freeze scheme is required.
Chatterjee Class
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/docs/source/sensitivity/sobol.rst b/docs/source/sensitivity/sobol.rst
index 60469b28c..4b482e81f 100644
--- a/docs/source/sensitivity/sobol.rst
+++ b/docs/source/sensitivity/sobol.rst
@@ -17,7 +17,7 @@ If the first order index of an input parameter is equal to the total order index
The Sobol indices are typically computed using the Pick-and-Freeze approach for single output and multi-output models. Since there are several variants of the Pick-and-Freeze approach, the schemes implemented to compute Sobol indices are listed below:
-Here, :math:`N` is the number of Monte Carlo samples and :math:`m` being the number of input parameters in the model.
+Here, :math:`N` is the Monte Carlo sample size and :math:`m` is the number of input parameters in the model.
1. **First order indices** (:math:`S_{i}`)
From e57f82dcdb2089869e7d942670733577378344d7 Mon Sep 17 00:00:00 2001
From: Prateek Bhustali
Date: Sun, 3 Jul 2022 17:32:12 +0200
Subject: [PATCH 59/59] Described why SA outputs are different
---
docs/code/sensitivity/comparison/plot_additive.py | 14 ++++++++++++++
1 file changed, 14 insertions(+)
diff --git a/docs/code/sensitivity/comparison/plot_additive.py b/docs/code/sensitivity/comparison/plot_additive.py
index 7c34e28bb..623ffd641 100644
--- a/docs/code/sensitivity/comparison/plot_additive.py
+++ b/docs/code/sensitivity/comparison/plot_additive.py
@@ -3,9 +3,22 @@
Additive function
==============================================
+We use an elementary example to intuitively convey the sensitivities according to
+different metrics.
+
.. math::
f(x) = a \cdot X_1 + b \cdot X_2, \quad X_1, X_2 \sim \mathcal{N}(0, 1), \quad a,b \in \mathbb{R}
+In the plot below, we note that the indices provide different sensitivities for the two
+inputs. The variance-based Sobol indices use variance as a metric to quantify
+sensitivity, whereas the Chatterjee/Cramér-von Mises indices use the entire probability
+distribution function (PDF) to quantify the sensitivity. In general, moment-free indices
+provide a more holistic measure of sensitivity unlike the variance-based indices, which
+are accurate mainly when the output distribution close to a Gaussian (see [1]_ for a
+motivating example).
+
+.. [1] Borgonovo, E. (2006). Measuring uncertainty importance: Investigation and comparison of alternative approaches. Risk Analysis, 26(5), 1349-1361.
+
"""
# %%
@@ -145,3 +158,4 @@
ax.legend()
plt.show()
+