From a0ddbede089ce5fee1bc152640496c66f5949e9c Mon Sep 17 00:00:00 2001 From: Prateek Bhustali Date: Sat, 7 May 2022 16:03:17 +0200 Subject: [PATCH 01/88] Added sobol sensitivity --- src/UQpy/sensitivity/baseclass/__init__.py | 0 src/UQpy/sensitivity/baseclass/pickfreeze.py | 64 ++ src/UQpy/sensitivity/baseclass/sensitivity.py | 318 ++++++ src/UQpy/sensitivity/sobol.py | 933 ++++++++++++++++++ 4 files changed, 1315 insertions(+) create mode 100644 src/UQpy/sensitivity/baseclass/__init__.py create mode 100644 src/UQpy/sensitivity/baseclass/pickfreeze.py create mode 100644 src/UQpy/sensitivity/baseclass/sensitivity.py create mode 100644 src/UQpy/sensitivity/sobol.py diff --git a/src/UQpy/sensitivity/baseclass/__init__.py b/src/UQpy/sensitivity/baseclass/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/UQpy/sensitivity/baseclass/pickfreeze.py b/src/UQpy/sensitivity/baseclass/pickfreeze.py new file mode 100644 index 000000000..4e9e2f57e --- /dev/null +++ b/src/UQpy/sensitivity/baseclass/pickfreeze.py @@ -0,0 +1,64 @@ +import copy + + +def generate_pick_freeze_samples(dist_obj, n_samples, random_state=None): + + """ + Generate samples to be used in the Pick-and-Freeze algorithm. + + **Outputs:** + + * **A_samples** (`ndarray`): + Sample set A. + Shape: `(n_samples, num_vars)`. + + * **B_samples** (`ndarray`): + Sample set B. + Shape: `(n_samples, num_vars)`. + + * **C_i_generator** (`generator`): + Generator for the sample set C_i. + Generator is used so that samples + do not have to be stored in memory. + C_i is a 2D array with all columns + from B_samples, except column `i`, + which is from A_samples. + Shape: `(n_samples, num_vars)`. + + * **D_i_generator** (`generator`): + Generator for the sample set C_i. + Generator is used so that samples + do not have to be stored in memory. + C_i is a 2D array with all columns + from A_samples, except column `i`, + which is from B_samples. + Shape: `(n_samples, num_vars)`. + + """ + + # Generate samples for A and B + samples = dist_obj.rvs(n_samples * 2, random_state=random_state) + + num_vars = samples.shape[1] + + # Split samples into two sets A and B + A_samples = samples[:n_samples, :] + B_samples = samples[n_samples:, :] + + # Iterator for generating C_i + def C_i_generator(): + """Generate C_i for each i.""" + for i in range(num_vars): + C_i = copy.deepcopy(B_samples) #! Deepcopy so B is unchanged + C_i[:, i] = A_samples[:, i] + yield C_i + + # Iterator for generating D_i + def D_i_generator(): + """Generate D_i for each i.""" + for i in range(num_vars): + D_i = copy.deepcopy(A_samples) #! Deepcopy so A is unchanged + D_i[:, i] = B_samples[:, i] + yield D_i + + return A_samples, B_samples, C_i_generator(), D_i_generator() diff --git a/src/UQpy/sensitivity/baseclass/sensitivity.py b/src/UQpy/sensitivity/baseclass/sensitivity.py new file mode 100644 index 000000000..af2adc594 --- /dev/null +++ b/src/UQpy/sensitivity/baseclass/sensitivity.py @@ -0,0 +1,318 @@ +""" + +This module contains the abstract Sensitivity class used by other +sensitivity classes: +1. Chatterjee indices +2. Cramer-von Mises indices +3. Generalised Sobol indices +4. Sobol indices + +""" + +import copy +import numpy as np +import scipy.stats + +from UQpy.run_model import RunModel +from UQpy.distributions.baseclass import DistributionContinuous1D +from UQpy.distributions.collection import JointIndependent + + +class Sensitivity: + def __init__( + self, runmodel_object, dist_object, random_state=None, **kwargs + ) -> None: + + # Check RunModel object + if not isinstance(runmodel_object, RunModel): + raise TypeError("UQpy: runmodel_object must be an object of class RunModel") + + self.runmodel_object = runmodel_object + + # Check distributions + if isinstance(dist_object, list): + for i in range(len(dist_object)): + if not isinstance(dist_object[i], (DistributionContinuous1D, JointIndependent)): + raise TypeError( + "UQpy: A ``DistributionContinuous1D`` or ``JointInd`` object " + "must be provided." + ) + else: + if not isinstance(dist_object, (DistributionContinuous1D, JointIndependent)): + raise TypeError( + "UQpy: A ``DistributionContinuous1D`` or ``JointInd`` object must be provided." + ) + + self.dist_object = dist_object + + # Check random state + self.random_state = random_state + if isinstance(self.random_state, int): + self.random_state = np.random.RandomState(self.random_state) + elif not ( + self.random_state is None + or isinstance(self.random_state, np.random.RandomState) + ): + raise TypeError( + "UQpy: random state should be None, an integer or np.random.RandomState object" + ) + + # wrapper created for convenience to generate model evaluations + def _run_model(self, samples): + """Generate model evaluations for a set of samples. + + **Inputs**: + + * **samples** (`numpy.ndarray`): + A set of samples. + Shape: `(n_samples, num_vars)` + + **Outputs**: + + * **model_evaluations** (`numpy.ndarray`): + A set of model evaluations. + Shape: `(n_samples,)` + + if multioutput: `(n_samples, n_outputs)` + + """ + + self.runmodel_object.run(samples=samples, append_samples=False) + model_evals = copy.deepcopy(np.array(self.runmodel_object.qoi_list)) + + return model_evals + + @staticmethod + def bootstrap_sample_generator_1D(samples): + """Generate bootstrap samples. + + Generators are used to avoid copying the entire array. + + It will simply pick `N` random rows from the array. + + For example: + Model evaluations for the samples in A in the pick and freeze estimator. + + **Inputs:** + + * **samples** (`ndarray`): + Model evaluations for the samples. + Shape: `(n_samples, 1)`. + + **Outputs:** + + * `generator`: + Generator for the bootstrap samples. + + """ + n_samples = samples.shape[0] + + while True: + _indices = np.random.randint(0, high=n_samples, size=n_samples) + + yield samples[_indices] + + @staticmethod + def bootstrap_sample_generator_2D(samples): + """Generate bootstrap samples. + + Generators are used to avoid copying the entire array. + + For example: + Let's say we have '3' random variables + To pick bootstrap samples from f_C_i, we first + generate indices to pick values from each column + num_cols = 3 + cols = [0, 1, 2] + _indices = [[3, 4, 8] + [6, 1, 2] + [0, 5, 7] + [4, 1, 0]] (4x3) + elements from f_C_i will be picked column-wise: + f_C_i[_indices[:, 0], 0] + f_C_i[_indices[:, 1], 1] etc. + + **Inputs:** + + * **samples** (`ndarray`): + Model evaluations for the samples. + Shape: `(n_samples, 1)`. + + **Outputs:** + + * `generator`: + Generator for the bootstrap samples. + + """ + n_samples = samples.shape[0] + + num_cols = samples.shape[1] + cols = np.arange(num_cols) + + while True: + # generate indices to pick N values from f_A, f_B and f_C_i + _indices = np.random.randint(0, high=n_samples, size=samples.shape) + + yield samples[_indices, cols] + + @staticmethod + def bootstrap_sample_generator_3D(samples): + """Generate bootstrap samples. + + Generators are used to avoid copying the entire array. + + For example: + Let's say we a model with multiple outputs. + We use the same approach as in the 2D + case for each slice the 3D array. + Here, slices refer to the 'depth' of the array, + given by array.shape[0]. + + **Inputs:** + + * **samples** (`ndarray`): + Model evaluations for the samples. + Shape: `(n_outputs, n_samples, num_vars)`. + + **Outputs:** + + * `generator`: + Generator for the bootstrap samples. + + """ + n_samples = samples.shape[1] + array_shape = samples.shape[1:] + num_cols = samples.shape[2] + cols = np.arange(num_cols) + + while True: + _indices = np.random.randint(0, high=n_samples, size=array_shape) + + yield samples[:, _indices, cols] + + def bootstrapping( + self, + estimator, + estimator_inputs, + qoi_mean, + num_bootstrap_samples, + confidence_level=0.95, + **kwargs, + ): + + """An abstract method to implement bootstrapping. + + **Inputs:** + + * **estimator** (`function`): + A method/func which computes the statistical + quantities of interest (QoI). + Example: `compute_first_order_Sobol` + It must be a method/function that takes several `ndarray`s + of samples as input and returns a single `ndarray` of estimated value. + + * **estimator_inputs** (`list`): + Inputs to the estimator concantenated in a list. + + * **qoi_mean** (`ndarray`): + Mean of the QoI. + This is the value around which we + will compute the confidence interval. + Shape: `(n_qois, n_outputs)`. + + * **num_bootstrap_samples** (`int`): + Number of bootstrap samples to generate. + + * **confidence_level** (`float`): + Confidence level for the confidence interval. + Default: 0.95 + + **Outputs:** + + * **confidence_interval_qoi** (`ndarray`): + Confidence interval for the quantity of interest (QoI). + + """ + + n_qois = qoi_mean.shape[0] + n_outputs = qoi_mean.shape[1] + + ##################### STORAGE ##################### + + # store generators of the inputs for bootstrap sampling + input_generators = [] + + # store the qoi computed using bootstrap samples + bootstrapped_qoi = np.zeros((n_outputs, n_qois, num_bootstrap_samples)) + + # store the confidence interval for each qoi + confidence_interval_qoi = np.zeros((n_outputs, n_qois, 2)) + + ##################### CREATE GENERATORS ##################### + + for i, input in enumerate(estimator_inputs): + + if isinstance(input, np.ndarray): + + # Example: f_A or f_B of models with single output. + # Shape: `(n_samples, 1)`. + if input.ndim == 2 and input.shape[1] == 1: + input_generators.append(self.bootstrap_sample_generator_1D(input)) + + # Example: f_C_i or f_D_i of models with single output. + # Shape: `(n_samples, num_vars)`. + elif input.ndim == 2 and input.shape[1] > 1: + input_generators.append(self.bootstrap_sample_generator_2D(input)) + + # Example: f_C_i or f_D_i of models with multiple outputs. + # Shape: `(n_outputs, n_samples, num_vars)`. + elif input.ndim == 3: + input_generators.append(self.bootstrap_sample_generator_3D(input)) + + # Example: if models evals is None. + elif input == None: + input_generators.append(input) + + else: + raise ValueError( + f"UQpy: estimator_inputs[{i}] should be either None or `ndarray` of dimension 1, 2 or 3" + ) + + ################### BOOTSTRAPPING ################## + + # Compute the qoi for each bootstrap sample + for j in range(num_bootstrap_samples): + + # inputs to the estimator + args = [] + + # generate samples + for gen_input in input_generators: + if gen_input == None: + args.append(gen_input) + else: + args.append(gen_input.__next__()) + + bootstrapped_qoi[:, :, j] = estimator(*args, **kwargs).T + + ################# CONFIDENCE INTERVAL ################ + + # Calculate confidence intervals + delta = -scipy.stats.norm.ppf((1 - confidence_level) / 2) + + for output_j in range(n_outputs): + + # estimate the standard deviation using the bootstrap indices + std_qoi = np.std(bootstrapped_qoi[output_j, :, :], axis=1, ddof=1) + + lower_bound = qoi_mean[:, output_j] - delta * std_qoi + upper_bound = qoi_mean[:, output_j] + delta * std_qoi + + confidence_interval_qoi[output_j, :, 0] = lower_bound + confidence_interval_qoi[output_j, :, 1] = upper_bound + + # For models with single output, return 2D array. + if n_outputs == 1: + confidence_interval_qoi = confidence_interval_qoi[0, :, :] + + return confidence_interval_qoi diff --git a/src/UQpy/sensitivity/sobol.py b/src/UQpy/sensitivity/sobol.py new file mode 100644 index 000000000..d4fb1de56 --- /dev/null +++ b/src/UQpy/sensitivity/sobol.py @@ -0,0 +1,933 @@ +""" + +The Sobol class computes the Sobol indices for single output and multi-output +models. The Sobol indices can be computed using various pick-and-freeze +schemes. + +The schemes implemented are listed below: + +# First order indices: +- Sobol1993 [1]: Requires n_samples*(num_vars + 1) model evaluations +- Saltelli2002 [3]: Requires n_samples*(2*num_vars + 1) model evaluations +- Janon2014 [4]: Requires n_samples*(num_vars + 1) model evaluations + +# Second order indices: +- Saltelli2002 [3]: Requires n_samples*(2*num_vars + 1) model evaluations + +# Total order indices: +- Homma1996: Requires n_samples*(num_vars + 1) model evaluations +- Saltelli2002 [3]: Requires n_samples*(2*num_vars + 1) model evaluations + +For more details on "Saltelli2002" refer to [3]. + +Note: Apart from second order indices, the Saltelli2002 scheme provides + more accurate estimates of all indices, as opposed to Homma1996 or Sobol1993. + Because this method efficiently utilizes the higher number of model evaluations. + +Additionally, we can compute the confidence intervals for the Sobol indices +using bootstrapping [2]. + + +References +---------- + +.. [1] Sobol, I.M. (1993) Sensitivity Estimates for Nonlinear Mathematical Models. + Mathematical Modelling and Computational Experiments, 4, 407-414. + +.. [2] Jeremy Orloff and Jonathan Bloom (2014), Bootstrap confidence intervals, + Introduction to Probability and Statistics, MIT OCW. + +.. [3] Saltelli, A. (2002). Making best use of model evaluations to + compute sensitivity indices. + +.. [4] Janon, Alexander; Klein, Thierry; Lagnoux, Agnes; Nodet, MaĆ«lle; + Prior, Clementine. Asymptotic normality and efficiency of two Sobol index + estimators. ESAIM: Probability and Statistics, Volume 18 (2014), pp. 342-364. + doi:10.1051/ps/2013040. http://www.numdam.org/articles/10.1051/ps/2013040/ + +""" + +import math +import logging +import itertools + +import numpy as np + +from UQpy.sensitivity.baseclass.sensitivity import Sensitivity +from UQpy.sensitivity.baseclass.pickfreeze import generate_pick_freeze_samples +from UQpy.utilities.UQpyLoggingFormatter import UQpyLoggingFormatter + +# TODO: Sampling strategies + + +class Sobol(Sensitivity): + """ + Compute Sobol sensitivity indices using the pick + and freeze algorithm. For models with multiple outputs + (vector-valued response), the sensitivity indices are computed for each + output separately. + For time-series models, the sensitivity indices are computed for each + time instant separately. (Pointwise-in-time Sobol indices) + + **Inputs:** + + * **runmodel_object** (``RunModel`` object): + The computational model. It should be of type + ``RunModel`` (see ``RunModel`` class). + The output QoI can be a scalar or vector of + length `ny`, then the sensitivity indices of + all `ny` outputs are computed independently. + + * **dist_object** ((list of) ``Distribution`` object(s)): + List of ``Distribution`` objects corresponding + to each random variable, or ``JointInd`` object + (multivariate RV with independent marginals). + + * **random_state** (None or `int` or ``numpy.random.RandomState`` object): + Random seed used to initialize the + pseudo-random number generator. + Default is None. + + **Attributes:** + + * **sobol_i** (`ndarray`): + First order sensitivity indices. + Shape: `(num_vars, n_outputs)` + + * **sobol_total_i** (`ndarray`): + Total order sensitivity indices. + Shape: `(num_vars, n_outputs)` + + * **sobol_ij** (`ndarray`): + Second order sensitivity indices. + Shape: `(num_second_order_terms, n_outputs)` + + * **CI_sobol_i** (`ndarray`): + Confidence intervals for the first order sensitivity indices. + Shape: `(num_vars, 2)` + + if multioutput: Shape: `(n_outputs, num_vars, 2)` + + * **CI_sobol_total_i** (`ndarray`): + Confidence intervals for the total order sensitivity indices. + Shape: `(num_vars, 2)` + + if multioutput: Shape: `(n_outputs, num_vars, 2)` + + * **CI_sobol_ij** (`ndarray`): + Confidence intervals for the second order Sobol indices. + Shape: `(num_second_order_terms, 2)` + + if multioutput: Shape: `(n_outputs, num_second_order_terms, 2)` + + * **n_samples** (`int`): + Number of samples used to compute the sensitivity indices. + + * **num_vars** (`int`): + Number of model input variables. + + * **multioutput** (`bool`): + True if the model has multiple outputs. + + **Methods:** + """ + + def __init__( + self, runmodel_object, dist_object, random_state=None, **kwargs + ) -> None: + + super().__init__(runmodel_object, dist_object, random_state, **kwargs) + + # Create logger with the same name as the class + self.logger = logging.getLogger(__name__) + self.logger.setLevel(logging.ERROR) + frmt = UQpyLoggingFormatter() + + # create console handler with a higher log level + ch = logging.StreamHandler() + ch.setFormatter(frmt) + + # add the handler to the logger + self.logger.addHandler(ch) + + def run( + self, + n_samples=1_000, + num_bootstrap_samples=None, + confidence_level=0.95, + estimate_second_order=False, + first_order_scheme="Janon2014", + total_order_scheme="Homma1996", + second_order_scheme="Saltelli2002", + ): + + """ + Compute the sensitivity indices and confidence intervals. + + **Inputs:** + + * **n_samples** (`int`): + Number of samples used to compute the sensitivity indices. + Default is 1,000. + + * **num_boostrap_samples** (`int`): + Number of bootstrap samples used to compute + the confidence intervals. + Default is None. + + * **confidence_interval** (`float`): + Confidence interval used to compute the confidence intervals. + Default is 0.95. + + * **estimate_second_order** (`bool`): + If True, compute the second order sensitivity indices. + Default is False. + + * **first_order_scheme** (`str`): + Scheme used to compute the first order Sobol indices. + Default is "Sobol1993". + + * **total_order_scheme** (`str`): + Scheme used to compute the total order Sobol indices. + Default is "Homma1996". + + * **second_order_scheme** (`str`): + Scheme used to compute the second order Sobol indices. + Default is "Saltelli2002". + + **Outputs:** + + * **computed_indices** (`dict`): + Dictionary containing the computed sensitivity indices. + + * **sobol_i** (`ndarray`): + First order Sobol indices. + Shape: `(num_vars, n_outputs)` + + * **sobol_total_i** (`ndarray`): + Total order Sobol indices. + Shape: `(num_vars, n_outputs)` + + * **sobol_ij** (`ndarray`): + Second order Sobol indices. + Shape: `(num_second_order_terms, n_outputs)` + + * **CI_sobol_i** (`ndarray`): + Confidence intervals for the first order Sobol indices. + Shape: `(num_vars, 2)` + + if multioutput: Shape: `(n_outputs, num_vars, 2)` + + * **CI_sobol_total_i** (`ndarray`): + Confidence intervals for the total order Sobol indices. + Shape: `(num_vars, 2)` + + if multioutput: Shape: `(n_outputs, num_vars, 2)` + + * **CI_sobol_ij** (`ndarray`): + Confidence intervals for the second order Sobol indices. + Shape: `(num_second_order_terms, 2)` + + if multioutput: Shape: `(n_outputs, num_second_order_terms, 2)` + + """ + # Check n_samples data type + self.n_samples = n_samples + if not isinstance(self.n_samples, int): + raise TypeError("UQpy: n_samples should be an integer.") + + # Check num_bootstrap_samples data type + if num_bootstrap_samples is not None: + if not isinstance(num_bootstrap_samples, int): + raise TypeError("UQpy: num_bootstrap_samples should be an integer.") + elif num_bootstrap_samples is None: + self.logger.info( + "UQpy: num_bootstrap_samples is set to None, confidence intervals will not be computed." + ) + + ################## GENERATE SAMPLES ################## + + ( + A_samples, + B_samples, + C_i_generator, + D_i_generator, + ) = generate_pick_freeze_samples( + self.dist_object, self.n_samples, self.random_state + ) + + self.logger.info("UQpy: Generated samples using the pick-freeze scheme.") + + self.num_vars = A_samples.shape[1] # Number of variables + + ################# MODEL EVALUATIONS #################### + + A_model_evals = self._run_model(A_samples) # shape: (n_samples, n_outputs) + + self.logger.info("UQpy: Model evaluations A completed.") + + B_model_evals = self._run_model(B_samples) # shape: (n_samples, n_outputs) + + self.logger.info("UQpy: Model evaluations B completed.") + + # Check the number of outputs of the model + try: + self.n_outputs = A_model_evals.shape[1] + except: + self.n_outputs = 1 + + # multioutput flag + self.multioutput = True if self.n_outputs > 1 else False + + if not self.multioutput: + A_model_evals = A_model_evals.reshape(-1, 1) + B_model_evals = B_model_evals.reshape(-1, 1) + + C_i_model_evals = np.zeros((self.n_outputs, self.n_samples, self.num_vars)) + + for i, C_i in enumerate(C_i_generator): + C_i_model_evals[:, :, i] = self._run_model(C_i).T + + self.logger.info("UQpy: Model evaluations C completed.") + + # Compute D_i_model_evals only if needed + if estimate_second_order or total_order_scheme == "Saltelli2002": + + D_i_model_evals = np.zeros((self.n_outputs, self.n_samples, self.num_vars)) + + for i, D_i in enumerate(D_i_generator): + D_i_model_evals[:, :, i] = self._run_model(D_i).T + + self.logger.info("UQpy: Model evaluations D completed.") + + else: + D_i_model_evals = None + + self.logger.info("UQpy: All model evaluations computed successfully.") + + ######################### STORAGE ######################## + + # Create dictionary to store the sensitivity indices + computed_indices = {} + + ################## COMPUTE SOBOL INDICES ################## + + # First order Sobol indices + self.sobol_i = compute_first_order( + A_model_evals, + B_model_evals, + C_i_model_evals, + D_i_model_evals, + scheme=first_order_scheme, + ) + + self.logger.info("UQpy: First order Sobol indices computed successfully.") + + computed_indices["sobol_i"] = self.sobol_i + + # Total order Sobol indices + self.sobol_total_i = compute_total_order( + A_model_evals, + B_model_evals, + C_i_model_evals, + D_i_model_evals, + scheme=total_order_scheme, + ) + + self.logger.info("UQpy: Total order Sobol indices computed successfully.") + + computed_indices["sobol_total_i"] = self.sobol_total_i + + if estimate_second_order: + + # Second order Sobol indices + self.sobol_ij = compute_second_order( + A_model_evals, + B_model_evals, + C_i_model_evals, + D_i_model_evals, + computed_indices["sobol_i"], + scheme=second_order_scheme, + ) + + self.logger.info("UQpy: Second order Sobol indices computed successfully.") + + computed_indices["sobol_ij"] = self.sobol_ij + + ################## CONFIDENCE INTERVALS #################### + + if num_bootstrap_samples is not None: + + self.logger.info("UQpy: Computing confidence intervals ...") + + estimator_inputs = [ + A_model_evals, + B_model_evals, + C_i_model_evals, + D_i_model_evals, + ] + + # First order Sobol indices + self.CI_sobol_i = self.bootstrapping( + compute_first_order, + estimator_inputs, + computed_indices["sobol_i"], + num_bootstrap_samples, + confidence_level, + scheme=first_order_scheme, + ) + + self.logger.info( + "UQpy: Confidence intervals for First order Sobol indices computed successfully." + ) + + computed_indices["CI_sobol_i"] = self.CI_sobol_i + + # Total order Sobol indices + self.CI_sobol_total_i = self.bootstrapping( + compute_total_order, + estimator_inputs, + computed_indices["sobol_total_i"], + num_bootstrap_samples, + confidence_level, + scheme=total_order_scheme, + ) + + self.logger.info( + "UQpy: Confidence intervals for Total order Sobol indices computed successfully." + ) + + computed_indices["CI_sobol_total_i"] = self.CI_sobol_total_i + + # Second order Sobol indices + if estimate_second_order: + self.CI_sobol_ij = self.bootstrapping( + compute_second_order, + estimator_inputs, + computed_indices["sobol_ij"], + num_bootstrap_samples, + confidence_level, + first_order_sobol=computed_indices["sobol_i"], + scheme=second_order_scheme, + ) + + self.logger.info( + "UQpy: Confidence intervals for Second order Sobol indices computed successfully." + ) + + computed_indices["CI_sobol_ij"] = self.CI_sobol_ij + + return computed_indices + + +###################### Pick and Freeze Methods ##################### + +""" + +These methods are also called by other sensitivity methods (such as Chatterjee, +Cramer-von Mises) to estimate the Sobol indices and therefore are implemented as +functions and not static methods in the Sobol class. + + +#! Saltelli2002 +-------------------------------------------------------------------------------- + +Sobol indices estimated as per Theorem 2 in [3]_. Refer page 7 in +[3]_ for details. + +Since there are several sets of function evaluations available, +there are several ways to estimate E[Y]^2 and V[Y]. +Below we summarise the evaluations to be used as given in Theorem 2. + +# First-order indices: + - E[Y]^2 : f_A, f_B + - V[Y] : f_A + - S_i = ( /N - E[Y]^2 ) / V[Y] + + +# Second-order indices: + - Estimate 1: + - E[Y]^2 : f_C_l, f_D_l -> l = max(i,j) + - V[Y] : f_C_j or f_D_i + - V^c_ij = f_D_i, f_C_j + + - Estimate 2: + - E[Y]^2: f_C_l, f_D_l -> l = min(i,j) + - V[Y] : f_C_i or f_D_j + - V^c_ij = f_D_j, f_C_i + + where: + S_ij = S^c_ij - S_i - S_j + S^c_ij = ( /N - E[Y]^2 ) / V[Y] # Esimate 1 + = ( /N - E[Y]^2 ) / V[Y] # Esimate 2 + +# Total-order indices: + - E[Y]^2 : f_B + - V[Y] : f_B + - S_T_i = 1 - ( /N - E[Y]^2 ) / V[Y] + +For m=5, the Sobol indices are estimated as follows: +First order indices: 2 estimates +Second order indices: 2 estimates +Total order indices: 2 estimates +S_{-ij}: 2 estimates ++-------+--------+---------+---------+---------+---------+--------+---------+---------+---------+---------+-------+------+ +| | f_B | f_C_1 | f_C_2 | f_C_3 | f_C_4 | f_C_5 | f_D_1 | f_D_2 | f_D_3 | f_D_4 | f_D_5 | f_A | ++=======+========+=========+=========+=========+=========+========+=========+=========+=========+=========+=======+======+ +| f_B | V[Y] | | | | | | | | | | | | ++-------+--------+---------+---------+---------+---------+--------+---------+---------+---------+---------+-------+------+ +| f_C_1 | S_T_1 | V[Y] | | | | | | | | | | | ++-------+--------+---------+---------+---------+---------+--------+---------+---------+---------+---------+-------+------+ +| f_C_2 | S_T_2 | V^c_-12 | V[Y] | | | | | | | | | | ++-------+--------+---------+---------+---------+---------+--------+---------+---------+---------+---------+-------+------+ +| f_C_3 | S_T_3 | V^c_-13 | V^c_-23 | V[Y] | | | | | | | | | ++-------+--------+---------+---------+---------+---------+--------+---------+---------+---------+---------+-------+------+ +| f_C_4 | S_T_4 | V^c_-14 | V^c_-24 | V^c_-34 | V[Y] | | | | | | | | ++-------+--------+---------+---------+---------+---------+--------+---------+---------+---------+---------+-------+------+ +| f_C_5 | S_T_5 | V^c_-15 | V^c_-25 | V^c_-35 | V^c_-45 | V[Y] | | | | | | | ++-------+--------+---------+---------+---------+---------+--------+---------+---------+---------+---------+-------+------+ +| f_D_1 | S_1 | E^2[Y] | V^c_12 | V^c_13 | V^c_14 | V^c_15 | V[Y] | | | | | | ++-------+--------+---------+---------+---------+---------+--------+---------+---------+---------+---------+-------+------+ +| f_D_2 | S_2 | V^c_12 | E^2[Y] | V^c_23 | V^c_24 | V^c_25 | V^c_-12 | V[Y] | | | | | ++-------+--------+---------+---------+---------+---------+--------+---------+---------+---------+---------+-------+------+ +| f_D_3 | S_3 | V^c_13 | V^c_23 | E^2[Y] | V^c_34 | V^c_35 | V^c_-13 | V^c_-23 | V[Y] | | | | ++-------+--------+---------+---------+---------+---------+--------+---------+---------+---------+---------+-------+------+ +| f_D_4 | S_4 | V^c_14 | V^c_24 | V^c_34 | E^2[Y] | V^c_45 | V^c_-14 | V^c_-24 | V^c_-34 | V[Y] | | | ++-------+--------+---------+---------+---------+---------+--------+---------+---------+---------+---------+-------+------+ +| f_D_5 | S_5 | V^c_15 | V^c_25 | V^c_35 | V^c_45 | E^2[Y] | V^c_-15 | V^c_-25 | V^c_-35 | V^c_-45 | V[Y] | | ++-------+--------+---------+---------+---------+---------+--------+---------+---------+---------+---------+-------+------+ +| f_A | E^2[Y] | S_1 | S_2 | S_3 | S_4 | S_5 | S_T_1 | S_T_2 | S_T_3 | S_T_4 | S_T_5 | V[Y] | ++-------+--------+---------+---------+---------+---------+--------+---------+---------+---------+---------+-------+------+ + +For m>5, we can follow the same procedure as above. + +For m = 4, the Sobol indices are estimated as follows: +First order indices: 2 estimates +Second order indices: 4 estimates +Total order indices: 2 estimates ++-------+--------+--------+--------+--------+--------+--------+--------+--------+-------+------+ +| | f_B | f_C_1 | f_C_2 | f_C_3 | f_C_4 | f_D_1 | f_D_2 | f_D_3 | f_D_4 | f_A | ++=======+========+========+========+========+========+========+========+========+=======+======+ +| f_B | V[Y] | | | | | | | | | | ++-------+--------+--------+--------+--------+--------+--------+--------+--------+-------+------+ +| f_C_1 | S_T_1 | V[Y] | | | | | | | | | ++-------+--------+--------+--------+--------+--------+--------+--------+--------+-------+------+ +| f_C_2 | S_T_2 | V^c_34 | V[Y] | | | | | | | | ++-------+--------+--------+--------+--------+--------+--------+--------+--------+-------+------+ +| f_C_3 | S_T_3 | V^c_24 | V^c_14 | V[Y] | | | | | | | ++-------+--------+--------+--------+--------+--------+--------+--------+--------+-------+------+ +| f_C_4 | S_T_4 | V^c_23 | V^c_13 | V^c_12 | V[Y] | | | | | | ++-------+--------+--------+--------+--------+--------+--------+--------+--------+-------+------+ +| f_D_1 | S_1 | E^2[Y] | V^c_12 | V^c_13 | V^c_14 | V[Y] | | | | | ++-------+--------+--------+--------+--------+--------+--------+--------+--------+-------+------+ +| f_D_2 | S_2 | V^c_12 | E^2[Y] | V^c_23 | V^c_24 | V^c_34 | V[Y] | | | | ++-------+--------+--------+--------+--------+--------+--------+--------+--------+-------+------+ +| f_D_3 | S_3 | V^c_13 | V^c_23 | E^2[Y] | V^c_34 | V^c_25 | V^c_14 | V[Y] | | | ++-------+--------+--------+--------+--------+--------+--------+--------+--------+-------+------+ +| f_D_4 | S_4 | V^c_14 | V^c_24 | V^c_34 | E^2[Y] | V^c_23 | V^c_13 | V^c_12 | V[Y] | | ++-------+--------+--------+--------+--------+--------+--------+--------+--------+-------+------+ +| f_A | E^2[Y] | S_1 | S_2 | S_3 | S_4 | S_T_1 | S_T_2 | S_T_3 | S_T_4 | V[Y] | ++-------+--------+--------+--------+--------+--------+--------+--------+--------+-------+------+ + +For m = 3, the Sobol indices are estimated as follows: +First order indices: 4 estimates +Second order indices: 2 estimates +Total order indices: 2 estimates ++-------+--------+--------+--------+--------+-------+-------+-------+------+ +| | f_B | f_C_1 | f_C_2 | f_C_3 | f_D_1 | f_D_2 | f_D_3 | f_A | ++=======+========+========+========+========+=======+=======+=======+======+ +| f_B | V[Y] | | | | | | | | ++-------+--------+--------+--------+--------+-------+-------+-------+------+ +| f_C_1 | S_T_1 | V[Y] | | | | | | | ++-------+--------+--------+--------+--------+-------+-------+-------+------+ +| f_C_2 | S_T_2 | S_3 | V[Y] | | | | | | ++-------+--------+--------+--------+--------+-------+-------+-------+------+ +| f_C_3 | S_T_3 | S_2 | S_1 | V[Y] | | | | | ++-------+--------+--------+--------+--------+-------+-------+-------+------+ +| f_D_1 | S_1 | E^2[Y] | V^c_12 | V^c_13 | V[Y] | | | | ++-------+--------+--------+--------+--------+-------+-------+-------+------+ +| f_D_2 | S_2 | V^c_12 | E^2[Y] | V^c_23 | S_3 | V[Y] | | | ++-------+--------+--------+--------+--------+-------+-------+-------+------+ +| f_D_3 | S_3 | V^c_13 | V^c_23 | E^2[Y] | S_2 | S_1 | V[Y] | | ++-------+--------+--------+--------+--------+-------+-------+-------+------+ +| f_A | E^2[Y] | S_1 | S_2 | S_3 | S_T_1 | S_T_2 | S_T_3 | V[Y] | ++-------+--------+--------+--------+--------+-------+-------+-------+------+ + +""" + + +def compute_first_order( + A_model_evals, + B_model_evals, + C_i_model_evals, + D_i_model_evals=None, + scheme="Janon2014", +): + + """ + Compute first order Sobol' indices using the Pick-and-Freeze scheme. + + For the Sobol1996 scheme: + For computing the first order Sobol' indices, only f_A_model_evals and + f_C_i_model_evals are required. The other inputs are optional. + f_B_model_evals is set to None if f_B_model_evals is not provided. + + **Inputs:** + + * **A_model_evals** (`ndarray`): + Shape: `(n_samples, n_outputs)`. + + * **B_model_evals** (`ndarray`): + If not available, pass `None`. + Shape: `(n_samples, n_outputs)`. + + * **C_i_model_evals** (`ndarray`): + Shape: `(n_outputs, n_samples, num_vars)`. + + * **D_i_model_evals** (`ndarray`, optional): + Shape: `(n_outputs, n_samples, num_vars)`. + + * **scheme** (`str`, optional): + Scheme to use for computing the first order Sobol' indices. + Default: 'Sobol1993'. + + **Outputs:** + + * **first_order_sobol** (`ndarray`): + First order Sobol' indices. + Shape: `(num_vars, n_outputs)`. + + """ + + n_samples = A_model_evals.shape[0] + n_outputs = A_model_evals.shape[1] + num_vars = C_i_model_evals.shape[2] + + # Store first order Sobol' indices + first_order_sobol = np.zeros((num_vars, n_outputs)) + + if scheme == "Sobol1993": + + for output_j in range(n_outputs): + + f_A = A_model_evals[:, output_j] + f_B = B_model_evals[:, output_j] if B_model_evals is not None else None + + # combine all model evaluations + # to improve accuracy of the estimator + _all_model_evals = np.append(f_A, f_B) if f_B is not None else f_A + f_0 = np.mean(_all_model_evals) # scalar + + f_0_square = f_0**2 + total_variance = np.var(_all_model_evals, ddof=1) + + for var_i in range(num_vars): + + f_C_i = C_i_model_evals[output_j, :, var_i] + + S_i = (np.dot(f_A, f_C_i) / n_samples - f_0_square) / total_variance + + first_order_sobol[var_i, output_j] = S_i + + elif scheme == "Janon2014": + + for output_j in range(n_outputs): + + f_A = A_model_evals[:, output_j] + + for var_i in range(num_vars): + + f_C_i = C_i_model_evals[output_j, :, var_i] + + # combine all model evaluations + # to improve accuracy of the estimator + _all_model_evals = np.append(f_A, f_C_i) + f_0 = np.mean(_all_model_evals) + + f_0_square = f_0**2 + total_variance = np.mean(_all_model_evals**2) - f_0_square + + S_i = (np.dot(f_A, f_C_i) / n_samples - f_0_square) / total_variance + + first_order_sobol[var_i, output_j] = S_i + + elif scheme == "Saltelli2002": + + """ + Number of estimates for first order indices is 4 if + num_vars is 3, else 2. + + """ + + for output_j in range(n_outputs): + + f_A = A_model_evals[:, output_j] + f_B = B_model_evals[:, output_j] + f_0_square = np.dot(f_A, f_B) / n_samples + total_variance = np.var(f_A, ddof=1) + + for var_i in range(num_vars): + + f_C_i = C_i_model_evals[output_j, :, var_i] + f_D_i = D_i_model_evals[output_j, :, var_i] + + # (Estimate 1) + est_1 = (np.dot(f_A, f_C_i) / n_samples - f_0_square) / total_variance + + # (Estimate 2) + est_2 = (np.dot(f_B, f_D_i) / n_samples - f_0_square) / total_variance + + if num_vars == 3: + + # list of variable indices + list_vars = list(range(num_vars)) + list_vars.remove(var_i) + # combination of all remaining variables indices + rem_vars_perm = list(itertools.permutations(list_vars, 2)) + + # (Estimate 3) + var_a, var_b = rem_vars_perm[0] + f_C_a = C_i_model_evals[output_j, :, var_a] + f_C_b = C_i_model_evals[output_j, :, var_b] + est_3 = ( + np.dot(f_C_a, f_C_b) / n_samples - f_0_square + ) / total_variance + + # (Estimate 4) + var_a, var_b = rem_vars_perm[1] + f_D_a = D_i_model_evals[output_j, :, var_a] + f_D_b = D_i_model_evals[output_j, :, var_b] + est_4 = ( + np.dot(f_D_a, f_D_b) / n_samples - f_0_square + ) / total_variance + + first_order_sobol[var_i, output_j] = ( + est_1 + est_2 + est_3 + est_4 + ) / 4 + + else: + first_order_sobol[var_i, output_j] = (est_1 + est_2) / 2 + + return first_order_sobol + + +def compute_total_order( + A_model_evals, + B_model_evals, + C_i_model_evals, + D_i_model_evals=None, + scheme="Homma1996", +): + + """ + Compute total order Sobol' indices using the Pick-and-Freeze scheme. + + For the Homma1996 scheme: + For computing the first order Sobol' indices, only f_B_model_evals and + f_C_i_model_evals are required. + f_A_model_evals is set to None if f_A_model_evals is not provided. + + **Inputs:** + + * **A_model_evals** (`ndarray`): + If not available, pass `None`. + Shape: `(n_samples, n_outputs)`. + + * **B_model_evals** (`ndarray`): + Shape: `(n_samples, n_outputs)`. + + * **C_i_model_evals** (`ndarray`): + Shape: `(n_outputs, n_samples, num_vars)`. + + * **D_i_model_evals** (`ndarray`, optional): + Shape: `(n_outputs, n_samples, num_vars)`. + + * **scheme** (`str`, optional): + Scheme to use for computing the total order Sobol' indices. + Default: 'Homma1996'. + + **Outputs:** + + * **total_order_sobol** (`ndarray`): + Total order Sobol' indices. + Shape: `(num_vars, n_outputs)`. + + """ + + n_samples = A_model_evals.shape[0] + n_outputs = A_model_evals.shape[1] + num_vars = C_i_model_evals.shape[2] + + # Store total order Sobol' indices + total_order_sobol = np.zeros((num_vars, n_outputs)) + + if scheme == "Homma1996": + + for output_j in range(n_outputs): + + f_A = A_model_evals[:, output_j] if A_model_evals is not None else None + f_B = B_model_evals[:, output_j] + + # combine all model evaluations + # to improve accuracy of the estimator + _all_model_evals = np.append(f_A, f_B) if f_A is not None else f_B + f_0 = np.mean(_all_model_evals) # scalar + + f_0_square = f_0**2 + total_variance = np.var(_all_model_evals, ddof=1) + + for var_i in range(num_vars): + + f_C_i = C_i_model_evals[output_j, :, var_i] + + S_T_i = ( + 1 - (np.dot(f_B, f_C_i) / n_samples - f_0_square) / total_variance + ) + + total_order_sobol[var_i, output_j] = S_T_i + + elif scheme == "Saltelli2002": + + for output_j in range(n_outputs): + + f_A = A_model_evals[:, output_j] + f_B = B_model_evals[:, output_j] + f_0_square = np.mean(f_B) ** 2 + total_variance = np.var(f_B, ddof=1) + + for var_i in range(num_vars): + + f_C_i = C_i_model_evals[output_j, :, var_i] + f_D_i = D_i_model_evals[output_j, :, var_i] + + # (Estimate 1) + est_1 = ( + 1 - (np.dot(f_B, f_C_i) / n_samples - f_0_square) / total_variance + ) + + # (Estimate 2) + est_2 = ( + 1 - (np.dot(f_A, f_D_i) / n_samples - f_0_square) / total_variance + ) + + total_order_sobol[var_i, output_j] = (est_1 + est_2) / 2 + + return total_order_sobol + + +def compute_second_order( + A_model_evals, + B_model_evals, + C_i_model_evals, + D_i_model_evals, + first_order_sobol=None, # None to make it a make keyword argument + scheme="Saltelli2002", +): + """ + Compute the second order Sobol indices using the Pick-and-Freeze scheme. + + NOTE: + - Number of estimates for second order indices is 4 if + num_vars is 4, else 2. + + - Although the B_model_evals are not being used currently, they are + included for use in estimate 3 and 4 for case num_vars = 4. + + **Inputs:** + + * **A_model_evals** (`ndarray`): + Shape: `(n_samples, n_outputs)`. + + * **B_model_evals** (`ndarray`): + Shape: `(n_samples, n_outputs)`. + + * **C_i_model_evals** (`ndarray`): + Shape: `(n_outputs, n_samples, num_vars)`. + + * **D_i_model_evals** (`ndarray`, optional): + Shape: `(n_outputs, n_samples, num_vars)`. + + * **first_order_sobol** (`ndarray`): + First order Sobol' indices. + Shape: `(num_vars, n_outputs)`. + + * **scheme** (`str`, optional): + Scheme to use for computing the first order Sobol' indices. + Default: 'Sobol1993'. + + **Outputs:** + + * **second_order_sobol** (`ndarray`): + Second order Sobol indices. + Shape: `(num_second_order_terms, n_outputs)`. + """ + + n_samples = A_model_evals.shape[0] + n_outputs = A_model_evals.shape[1] + num_vars = C_i_model_evals.shape[2] + + second_order_terms = itertools.combinations(range(num_vars), 2) + second_order_terms = list(second_order_terms) + num_second_order_terms = math.comb(num_vars, 2) + + # Store second order Sobol' indices + second_order_sobol = np.zeros((num_second_order_terms, n_outputs)) + + if scheme == "Saltelli2002": + + for output_j in range(n_outputs): + + for k in range(num_second_order_terms): + + var_a, var_b = second_order_terms[k] + S_a = first_order_sobol[var_a, output_j] + S_b = first_order_sobol[var_b, output_j] + + # (Estimate 1) + var_c = np.max([var_a, var_b]) + f_C_c = C_i_model_evals[output_j, :, var_c] + f_D_c = D_i_model_evals[output_j, :, var_c] + f_0_square = np.dot(f_D_c, f_C_c) / n_samples + total_variance = np.var(f_D_c, ddof=1) + + f_C_a = C_i_model_evals[output_j, :, var_a] + f_D_b = D_i_model_evals[output_j, :, var_b] + S_c_ab_1 = ( + np.dot(f_C_a, f_D_b) / n_samples - f_0_square + ) / total_variance + + est_1 = S_c_ab_1 - S_a - S_b + + # (Estimate 2) + var_c = np.min([var_a, var_b]) + f_C_c = C_i_model_evals[output_j, :, var_c] + f_D_c = D_i_model_evals[output_j, :, var_c] + f_0_square = np.dot(f_D_c, f_C_c) / n_samples + total_variance = np.var(f_D_c, ddof=1) + + f_D_a = D_i_model_evals[output_j, :, var_a] + f_C_b = C_i_model_evals[output_j, :, var_b] + S_c_ab_2 = ( + np.dot(f_D_a, f_C_b) / n_samples - f_0_square + ) / total_variance + + est_2 = S_c_ab_2 - S_a - S_b + + if num_vars == 4: + + # (Estimate 3) + # TODO: How to compute this? + + # (Estimate 4) + # TODO: How to compute this? + + # second_order_sobol[k, output_j] = ( + # est_1 + est_2 + est_3 + est_4 + # ) / 4 + + pass + + else: + second_order_sobol[k, output_j] = (est_1 + est_2) / 2 + + return second_order_sobol From 18ed2f2fe4e6dbb446142c9e8b1255906a382b45 Mon Sep 17 00:00:00 2001 From: Prateek Bhustali Date: Sat, 7 May 2022 16:07:23 +0200 Subject: [PATCH 02/88] Add unit tests for sobol & sensitivity baseclass --- tests/unit_tests/sensitivity/ishigami.py | 17 + tests/unit_tests/sensitivity/sobol_func.py | 35 ++ .../unit_tests/sensitivity/test_baseclass.py | 234 ++++++++++ tests/unit_tests/sensitivity/test_sobol.py | 399 ++++++++++++++++++ 4 files changed, 685 insertions(+) create mode 100644 tests/unit_tests/sensitivity/ishigami.py create mode 100644 tests/unit_tests/sensitivity/sobol_func.py create mode 100644 tests/unit_tests/sensitivity/test_baseclass.py create mode 100644 tests/unit_tests/sensitivity/test_sobol.py diff --git a/tests/unit_tests/sensitivity/ishigami.py b/tests/unit_tests/sensitivity/ishigami.py new file mode 100644 index 000000000..41ec55149 --- /dev/null +++ b/tests/unit_tests/sensitivity/ishigami.py @@ -0,0 +1,17 @@ +""" +Auxiliary file +============================================== +""" + +import numpy as np + +def evaluate(X, params=[7, 0.1]): + """Non-monotonic Ishigami-Homma three parameter test function""" + + a = params[0] + b = params[1] + + Y = np.sin(X[:, 0]) + a * np.power(np.sin(X[:, 1]), 2) + \ + b * np.power(X[:, 2], 4) * np.sin(X[:, 0]) + + return Y diff --git a/tests/unit_tests/sensitivity/sobol_func.py b/tests/unit_tests/sensitivity/sobol_func.py new file mode 100644 index 000000000..af1636315 --- /dev/null +++ b/tests/unit_tests/sensitivity/sobol_func.py @@ -0,0 +1,35 @@ +import numpy as np +import copy + + +def evaluate(X, a_values): + + dims = len(a_values) + g = 1 + + for i in range(dims): + g_i = (np.abs(4 * X[:, i] - 2) + a_values[i]) / (1 + a_values[i]) + g *= g_i + + return g + + +def sensitivities(a_values): + + dims = len(a_values) + + Total_order = np.zeros((dims, 1)) + + V_i = (3 * (1 + a_values) ** 2) ** (-1) + + total_variance = np.prod(1 + V_i) - 1 + + First_order = V_i / total_variance + + for i in range(dims): + + rem_First_order = copy.deepcopy(V_i) + rem_First_order[i] = 0 + Total_order[i] = V_i[i] * np.prod(rem_First_order + 1) / total_variance + + return First_order.reshape(-1, 1), Total_order diff --git a/tests/unit_tests/sensitivity/test_baseclass.py b/tests/unit_tests/sensitivity/test_baseclass.py new file mode 100644 index 000000000..458826d6f --- /dev/null +++ b/tests/unit_tests/sensitivity/test_baseclass.py @@ -0,0 +1,234 @@ +""" +This module is used to test the functionalities of the baseclass. + +- test_pick_and_freeze_sampling: + Test the `generate_pick_and_test_samples` function. +- test_bootstrap_for_vector: + Test the bootstrap sampling for a vector. +- test_bootstrap_for_matrix: + Test the bootstrap sampling for a matrix. + +""" + +import numpy as np +import pytest + +from UQpy.run_model.RunModel import RunModel +from UQpy.run_model.model_execution.PythonModel import PythonModel +from UQpy.distributions import Uniform +from UQpy.distributions.collection.JointIndependent import JointIndependent +from UQpy.sensitivity.sobol import Sobol +from UQpy.sensitivity.baseclass.pickfreeze import generate_pick_freeze_samples + +# Prepare +############################################################################### + +# Prepare the input distribution +@pytest.fixture() +def ishigami_input_dist_object(): + """ + This function returns the input distribution for the Ishigami function. + + X1 ~ Uniform(-pi, pi) + X2 ~ Uniform(-pi, pi) + X3 ~ Uniform(-pi, pi) + + """ + return JointIndependent([Uniform(-np.pi, 2 * np.pi)] * 3) + + +@pytest.fixture() +def ishigami_model_object(): + """This function creates the Ishigami run_model_object""" + model = PythonModel( + model_script="ishigami.py", + model_object_name="evaluate", + var_names=[r"$X_1$", "$X_2$", "$X_3$"], + delete_files=True, + params=[7, 0.1], + ) + + runmodel_obj = RunModel(model=model) + + return runmodel_obj + + +@pytest.fixture() +def sobol_object(ishigami_model_object, ishigami_input_dist_object): + """This function returns the Sobol object.""" + + return Sobol(ishigami_model_object, ishigami_input_dist_object) + + +@pytest.fixture() +def sobol_object_input_samples_small(sobol_object): + """This creates the Sobol object.""" + + SA = sobol_object + + np.random.seed(12345) # set seed for reproducibility + + SA.n_samples = 2 + + return generate_pick_freeze_samples(SA.dist_object, SA.n_samples) + + +# Generate N pick and free samples +@pytest.fixture() +def pick_and_freeze_samples_small(): + """ + This function returns input matrices A, B and C_i with a small number + of samples for the Ishigami input distribution. + This is used to test the `generate_pick_and_freeze_samples` function. + + The samples are generated as follows: + + dist_1 = JointInd([Uniform(-np.pi, 2*np.pi)]*3) + + np.random.seed(12345) #! set seed for reproducibility + + n_samples = 2 + n_vars = 3 + + samples = dist_1.rvs(n_samples*2) + + # Split samples + A_samples = samples[:n_samples, :] + B_samples = samples[n_samples:, :] + + def _get_C_i(i, A, B): + C_i = copy.deepcopy(B) + C_i[:, i] = A[:, i] + return C_i + + C_samples = np.zeros((n_vars, n_samples, n_vars)) + + for i in range(3): + C_samples[i, :, :] = _get_C_i(i, A_samples, B_samples) + + print(np.around(A_samples,3)) + print(np.around(B_samples,3)) + print(np.around(C_samples,3)) + + """ + + A_samples = np.array([[2.699, 0.426, 1.564], [-1.154, 0.600, 0.965]]) + + B_samples = np.array([[-1.986, 2.919, 1.556], [-1.856, 0.962, 2.898]]) + + C_samples = np.array( + [ + [[2.699, 2.919, 1.556], [-1.154, 0.962, 2.898]], + [[-1.986, 0.426, 1.556], [-1.856, 0.6, 2.898]], + [[-1.986, 2.919, 1.564], [-1.856, 0.962, 0.965]], + ] + ) + + return A_samples, B_samples, C_samples + + +@pytest.fixture() +def random_f_A(): + """This function returns an A-like vector""" + + rand_f_A = np.array([[100], [101], [102], [103], [104]]) + + return rand_f_A + + +@pytest.fixture() +def random_f_C_i(): + """This function returns a C_i-like vector""" + + rand_f_C_i = np.array([[100, 200], [101, 201], [102, 202], [103, 203], [104, 204]]) + return rand_f_C_i + + +@pytest.fixture() +def manual_bootstrap_samples_f_A(): + """This function bootstraps the A-like vector using random indices""" + + # Genrated using np.random.randint(low=0, high=5, size=(5,1)) + # with np.random.seed(12345) + # rand_indices_f_A = np.array([ [2], + # [1], + # [4], + # [1], + # [2]]) + + # bootstrap_f_A = rand_f_A[rand_indices_A] + bootstrap_sample_A = np.array([[102], [101], [104], [101], [102]]) + + return bootstrap_sample_A + + +@pytest.fixture() +def manual_bootstrap_samples_f_C_i(): + """This function bootstraps the C_i-like vector using random indices""" + + # Genrated using np.random.randint(low=0, high=5, size=(5,2)) + # with np.random.seed(12345) + # rand_indices_C_i = np.array([ [2, 1], + # [4, 1], + # [2, 1], + # [1, 3], + # [1, 3]]) + + bootstrap_f_C_i = np.array( + [[102, 201], [104, 201], [102, 201], [101, 203], [101, 203]] + ) + + return bootstrap_f_C_i + + +# Unit tests +############################################################################### + + +def test_pick_and_freeze_sampling( + pick_and_freeze_samples_small, sobol_object_input_samples_small +): + + """Test the `generate_pick_and_test_samples` function.""" + + # Prepare + A_samples, B_samples, C_samples = pick_and_freeze_samples_small + A_test, B_test, C_test_generator, _ = sobol_object_input_samples_small + + # Act + assert np.allclose(A_samples, np.around(A_test, 3)) + assert np.allclose(B_samples, np.around(B_test, 3)) + + for i in range(3): + C_test = next(C_test_generator) + assert np.allclose(C_samples[i, :, :], np.around(C_test, 3)) + + +def test_bootstrap_for_vector(random_f_A, manual_bootstrap_samples_f_A): + + """Test the bootstrap sampling for a vector.""" + + # Prepare + np.random.seed(12345) #! set seed for reproducibility + + gen_f_A = Sobol.bootstrap_sample_generator_1D(random_f_A) + + bootstrap_samples_f_A = next(gen_f_A) + + # Act + assert np.array_equal(manual_bootstrap_samples_f_A, bootstrap_samples_f_A) + + +def test_bootstrap_for_matrix(random_f_C_i, manual_bootstrap_samples_f_C_i): + + """Test the bootstrap sampling for a matrix.""" + + # Prepare + np.random.seed(12345) #! set seed for reproducibility + + gen_f_C_i = Sobol.bootstrap_sample_generator_2D(random_f_C_i) + + bootstrap_samples_C_i = next(gen_f_C_i) + + # Act + assert np.array_equal(manual_bootstrap_samples_f_C_i, bootstrap_samples_C_i) diff --git a/tests/unit_tests/sensitivity/test_sobol.py b/tests/unit_tests/sensitivity/test_sobol.py new file mode 100644 index 000000000..64882a155 --- /dev/null +++ b/tests/unit_tests/sensitivity/test_sobol.py @@ -0,0 +1,399 @@ +""" +This is the test module for Sobol sensitivity indices. + +Here, we will use the Ishigami function to test the output. + +The following methods are tested: +1. generate_pick_and_freeze_samples +2. pick_and_freeze_estimator (First and Total order Sobol indices) +3. pick_and_freeze_estimator (Second order Sobol indices) using [1]_. + +References +---------- + +.. [1] Graham Glen, Kristin Isaacs, Estimating Sobol sensitivity indices using + correlations, Environmental Modelling & Software, Volume 37, 2012, Pages 157-166, + ISSN 1364-8152, https://doi.org/10.1016/j.envsoft.2012.03.014. + + +Important +---------- +The computed indices are computed using the `np.isclose` function. + +Function signature: + numpy.isclose(a, b, rtol=1e-05, atol=1e-08, equal_nan=False) + + Parameters: + a, b: array_like + Input arrays to compare. + + rtol: float + The relative tolerance parameter. + + atol: float + The absolute tolerance parameter. + +Each element of the `diff` array is compared as follows: +diff = |a - b| +diff <= atol + rtol * abs(b) + +- relative tolerance: rtol * abs(b) + It is the maximum allowed difference between a and b, + relative to the absolute value of b. + For example, to set a tolerance of 1%, pass rol=0.01, + which assures that the values are within 2 decimal places of each other. +- absolute tolerance: atol + When b is close to zero, the atol value is used. + +""" + +import ntpath +import numpy as np +import pytest +import scipy + +from UQpy.run_model.RunModel import RunModel +from UQpy.run_model.model_execution.PythonModel import PythonModel +from UQpy.distributions import Uniform +from UQpy.distributions.collection.JointIndependent import JointIndependent +from UQpy.sensitivity.sobol import Sobol + +# Prepare +############################################################################### + +# Prepare the input distribution +@pytest.fixture() +def ishigami_input_dist_object(): + """ + This function returns the input distribution for the Ishigami function. + + X1 ~ Uniform(-pi, pi) + X2 ~ Uniform(-pi, pi) + X3 ~ Uniform(-pi, pi) + + """ + return JointIndependent([Uniform(-np.pi, 2 * np.pi)] * 3) + + +@pytest.fixture() +def ishigami_model_object(): + """This function creates the Ishigami run_model_object""" + model = PythonModel( + model_script="ishigami.py", + model_object_name="evaluate", + var_names=[r"$X_1$", "$X_2$", "$X_3$"], + delete_files=True, + params=[7, 0.1], + ) + + runmodel_obj = RunModel(model=model) + + return runmodel_obj + + +@pytest.fixture() +def sobol_object(ishigami_model_object, ishigami_input_dist_object): + """This function returns the Sobol object.""" + + return Sobol(ishigami_model_object, ishigami_input_dist_object) + + +@pytest.fixture() +def analytical_ishigami_Sobol_indices(): + """ + Analytical Sobol indices for the Ishigami function. + + Copy-paste the following to reproduce the given indices: + + a = 7 + b = 0.1 + + V1 = 0.5*(1 + (b*np.pi**4)/5)**2 + V2 = (a**2)/8 + V3 = 0 + + VT3 = (8*(b**2)*np.pi**8)/225 + VT1 = V1 + VT3 + VT2 = V2 + + total_variance = V2 + (b*np.pi**4)/5 + ((b**2) * np.pi**8)/18 + 0.5 + + S = np.array([V1, V2, V3])/total_variance + S_T = np.array([VT1, VT2, VT3])/total_variance + + S = np.around(S, 4) + S_T = np.around(S_T, 4) + + """ + + S1 = 0.3139 + S2 = 0.4424 + S3 = 0 + + S_T1 = 0.5576 + S_T2 = 0.4424 + S_T3 = 0.2437 + + S = np.array([S1, S2, S3]) + S_T = np.array([S_T1, S_T2, S_T3]) + + return S.reshape(-1, 1), S_T.reshape(-1, 1) + + +@pytest.fixture() +def saltelli_ishigami_Sobol_indices(sobol_object): + + SA = sobol_object + + np.random.seed(12345) #! set seed for reproducibility + + computed_indices = SA.run(n_samples=1_000_000) + + return computed_indices["sobol_i"], computed_indices["sobol_total_i"] + + +@pytest.fixture() +def NUM_SAMPLES(): + """This function returns the number of samples for bootstrapping""" + + num_bootstrap_samples = 10_000 + num_samples = 100_000 + + return num_bootstrap_samples, num_samples + + +@pytest.fixture() +def bootstrap_sobol_index_variance(sobol_object, NUM_SAMPLES): + + #### SETUP #### + SA = sobol_object + + np.random.seed(12345) #! set seed for reproducibility + + confidence_level = 0.95 + delta = -scipy.stats.norm.ppf((1 - confidence_level) / 2) + + num_bootstrap_samples, n_samples = NUM_SAMPLES + + #### Compute indices #### + computed_indices = SA.run( + n_samples=n_samples, + num_bootstrap_samples=num_bootstrap_samples, + confidence_level=confidence_level, + ) + + First_order = computed_indices["sobol_i"].ravel() + Total_order = computed_indices["sobol_total_i"].ravel() + CI_first_order = computed_indices["CI_sobol_i"] + CI_total_order = computed_indices["CI_sobol_total_i"] + + #### Compute variance #### + upper_bound_first_order = CI_first_order[:, 1] + upper_bound_total_order = CI_total_order[:, 1] + + std_bootstrap_first_order = (upper_bound_first_order - First_order) / delta + std_bootstrap_total_order = (upper_bound_total_order - Total_order) / delta + + return std_bootstrap_first_order**2, std_bootstrap_total_order**2 + + +@pytest.fixture() +def model_eval_sobol_index_variance(): + + """ + For computational efficiency, the variance of the Sobol indices + is precomputed using model evaluations with + NUM_SAMPLES (num_repetitions=10_000, num_samples=100_000) + + Copy-paste the following code to generate the variance + of the Sobol indices: + + runmodel_obj = RunModel( + model_script='ishigami.py', + var_names=['X1', 'X2', 'X3'], + vec=True, delete_files=True) + + input_obj = JointInd([Uniform(-np.pi, 2*np.pi)]*3) + + SA = Sobol(runmodel_obj, input_obj) + + np.random.seed(12345) # for reproducibility + + num_repetitions, n_samples = 10_000, 100_000 + + num_vars = 3 + + sample_first_order = np.zeros((num_vars, num_repetitions)) + sample_total_order = np.zeros((num_vars, num_repetitions)) + + for i in range(num_repetitions): + S, S_T = SA.run(n_samples=n_samples) + + sample_first_order[:, i] = S.ravel() + sample_total_order[:, i] = S_T.ravel() + + variance_first_order = np.var(sample_first_order, axis=1, ddof=1).reshape(-1, 1) + variance_total_order = np.var(sample_total_order, axis=1, ddof=1).reshape(-1, 1) + + print(variance_first_order) + print(variance_total_order) + + """ + + variance_first_order = np.array([1.98518409e-05, 1.69268227e-05, 2.50390610e-05]) + + variance_total_order = np.array([2.82995855e-05, 2.46373399e-05, 2.59811868e-05]) + + return variance_first_order, variance_total_order + + +@pytest.fixture() +def sobol_g_function_input_dist_object(): + """ + This function returns the input distribution object for the Sobol G function. + + X1 ~ Uniform(0, 1) + X2 ~ Uniform(0, 1) + X3 ~ Uniform(0, 1) + X4 ~ Uniform(0, 1) + X5 ~ Uniform(0, 1) + X6 ~ Uniform(0, 1) + + """ + + dist_object = JointIndependent([Uniform(0, 1)] * 6) + + return dist_object + + +@pytest.fixture() +def sobol_g_function_model_object(): + """This function creates the Sobol g-function model object""" + + a_vals = np.array([0.0, 0.5, 3.0, 9.0, 99.0, 99.0]) + + model = PythonModel( + model_script="sobol_func.py", + model_object_name="evaluate", + delete_files=True, + a_values=a_vals, + ) + + runmodel_obj = RunModel(model=model) + + return runmodel_obj + + +@pytest.fixture() +def sobol_object_g_func( + sobol_g_function_input_dist_object, sobol_g_function_model_object +): + """This function creates the Sobol object for the g-function""" + + sobol_object = Sobol( + sobol_g_function_model_object, sobol_g_function_input_dist_object + ) + + return sobol_object + + +@pytest.fixture() +def analytical_sobol_g_func_second_order_indices(): + """ + This function returns the analytical second order Sobol indices for the g-function + + The values were obtained from [1]_. + + """ + + S12 = 0.0869305 + S13 = 0.0122246 + S14 = 0.00195594 + S15 = 0.00001956 + S16 = 0.00001956 + S23 = 0.00543316 + S24 = 0.00086931 + S25 = 0.00000869 + S26 = 0.00000869 + S34 = 0.00012225 + S35 = 0.00000122 + S36 = 0.00000122 + S45 = 0.00000020 + S46 = 0.00000020 + S56 = 2.0e-9 + + S_2 = [S12, S13, S14, S15, S16, S23, S24, S25, S26, S34, S35, S36, S45, S46, S56] + + return np.array(S_2).reshape(-1, 1) + + +@pytest.fixture() +def saltelli_sobol_g_function(sobol_object_g_func): + + SA = sobol_object_g_func + + np.random.seed(12345) #! set seed for reproducibility + + # Compute Sobol indices using the pick and freeze algorithm + # Save only second order indices + computed_indices = SA.run(n_samples=100_000, estimate_second_order=True) + + return computed_indices["sobol_ij"] + + +# Unit tests +############################################################################### + + +def test_pick_and_freeze_estimator( + analytical_ishigami_Sobol_indices, saltelli_ishigami_Sobol_indices +): + + """ + Test the Saltelli pick and freeze estimator using 1_000_000 samples. + """ + + # Prepare + S_analytical, S_T_analytical = analytical_ishigami_Sobol_indices + S_saltelli, S_T_saltelli = saltelli_ishigami_Sobol_indices + + # Act + assert S_analytical.shape == S_saltelli.shape + assert S_T_analytical.shape == S_T_saltelli.shape + # Idea: Measure accuracy upto 2 decimal places -> rtol=0, atol=1e-2 + assert np.isclose(S_saltelli, S_analytical, rtol=0, atol=1e-2).all() + assert np.isclose(S_T_saltelli, S_T_analytical, rtol=0, atol=1e-2).all() + + +def test_bootstrap_variance_computation( + model_eval_sobol_index_variance, bootstrap_sobol_index_variance +): + + """Test the bootstrap variance computation.""" + + # Prepare + var_first, var_total = model_eval_sobol_index_variance + boot_var_first, boot_var_total = bootstrap_sobol_index_variance + + # Act + assert var_first.shape == boot_var_first.shape + assert var_total.shape == boot_var_total.shape + + # Idea: Ensure bootstrap variance and MC variance are of same order -> rtol=0, atol=1e-4 + assert np.isclose(boot_var_first, var_first, rtol=0, atol=1e-4).all() + assert np.isclose(boot_var_total, var_total, rtol=0, atol=1e-4).all() + + +def test_second_order_indices( + analytical_sobol_g_func_second_order_indices, saltelli_sobol_g_function +): + + """Test the second order indices computation.""" + + # Prepare + S_2_analytical = analytical_sobol_g_func_second_order_indices + S_2 = saltelli_sobol_g_function + + # Act + # Idea: Ensure second order indices are of same order -> rtol=0, atol=1e-4 + assert np.isclose(S_2, S_2_analytical, rtol=0, atol=1e-2).all() From d2da7d124eb25e04e6aaf69f7fb3c5a182f64f9a Mon Sep 17 00:00:00 2001 From: Prateek Bhustali Date: Sat, 7 May 2022 21:56:06 +0200 Subject: [PATCH 03/88] Fixed minor typo --- docs/code/sensitivity/morris/README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/code/sensitivity/morris/README.rst b/docs/code/sensitivity/morris/README.rst index cb065129a..2d37d6599 100644 --- a/docs/code/sensitivity/morris/README.rst +++ b/docs/code/sensitivity/morris/README.rst @@ -3,7 +3,7 @@ Morris Screening Consider a model of the sort :math:`Y=h(X)`, :math:`Y` is assumed to be scalar, :math:`X=[X_{1}, ..., X_{d}]`. -For each input ;math:`X_{k}`, the elementary effect is computed as: +For each input :math:`X_{k}`, the elementary effect is computed as: .. math:: EE_{k} = \frac{Y(X_{1}, ..., X_{k}+\Delta, ..., X_{d})-Y(X_{1}, ..., X_{k}, ..., X_{d})}{\Delta} From 2ae7cebbbd197323f05ec2dcbf34d684d5a8448c Mon Sep 17 00:00:00 2001 From: Prateek Bhustali Date: Sun, 8 May 2022 14:20:25 +0200 Subject: [PATCH 04/88] Added modules in sensitivity __init__.py --- src/UQpy/sensitivity/__init__.py | 3 +++ src/UQpy/sensitivity/baseclass/__init__.py | 2 ++ 2 files changed, 5 insertions(+) diff --git a/src/UQpy/sensitivity/__init__.py b/src/UQpy/sensitivity/__init__.py index f7b7f2bdd..5a5b0d997 100644 --- a/src/UQpy/sensitivity/__init__.py +++ b/src/UQpy/sensitivity/__init__.py @@ -1,4 +1,7 @@ from UQpy.sensitivity.MorrisSensitivity import MorrisSensitivity from UQpy.sensitivity.PceSensitivity import PceSensitivity +from UQpy.sensitivity.sobol import Sobol from . import MorrisSensitivity +from . import PceSensitivity +from . import Sobol diff --git a/src/UQpy/sensitivity/baseclass/__init__.py b/src/UQpy/sensitivity/baseclass/__init__.py index e69de29bb..7e11a2b63 100644 --- a/src/UQpy/sensitivity/baseclass/__init__.py +++ b/src/UQpy/sensitivity/baseclass/__init__.py @@ -0,0 +1,2 @@ +from UQpy.sensitivity.baseclass.sensitivity import * +from UQpy.sensitivity.baseclass.pickfreeze import * From 23c0607cf9e4e7a625d893e872177dd7e315c595 Mon Sep 17 00:00:00 2001 From: Prateek Bhustali Date: Sun, 8 May 2022 14:21:05 +0200 Subject: [PATCH 05/88] Formatted conf.py with Black --- docs/source/conf.py | 175 +++++++++++++++++++++++--------------------- 1 file changed, 93 insertions(+), 82 deletions(-) diff --git a/docs/source/conf.py b/docs/source/conf.py index 77b7351f1..318e66625 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -51,14 +51,14 @@ # nbsphinx_custom_formats={ # ".md": ["jupytext.reads", {"fmt": "mystnb"}] # } -autoclass_content = 'init' +autoclass_content = "init" add_module_names = False -autodoc_member_order = 'bysource' +autodoc_member_order = "bysource" napoleon_use_param = True # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] -bibtex_bibfiles = ['bibliography.bib'] -bibtex_default_style = 'unsrt' +bibtex_bibfiles = ["bibliography.bib"] +bibtex_default_style = "unsrt" # Try to remove duplicate labels autosectionlabel_prefix_document = True @@ -69,83 +69,89 @@ exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "Model_Runs**"] sphinx_gallery_conf = { - 'examples_dirs': ['../code/dimension_reduction/diffusion_maps', - '../code/dimension_reduction/pod', - '../code/dimension_reduction/grassmann', - '../code/distributions/continuous_1d', - '../code/distributions/discrete_1d', - '../code/distributions/multivariate', - '../code/distributions/user_defined', - '../code/sampling/adaptive_kriging', - '../code/sampling/importance_sampling', - '../code/sampling/monte_carlo', - '../code/sampling/latin_hypercube', - '../code/sampling/mcmc', - '../code/sampling/simplex', - '../code/sampling/true_stratified_sampling', - '../code/sampling/refined_stratified_sampling', - '../code/inference/mle', - '../code/inference/info_model_selection', - '../code/inference/bayes_parameter_estimation', - '../code/inference/bayes_model_selection', - '../code/transformations/nataf', - '../code/sensitivity/morris', - '../code/stochastic_processes/bispectral', - '../code/stochastic_processes/karhunen_loeve', - '../code/stochastic_processes/spectral', - '../code/stochastic_processes/translation', - '../code/reliability/form', - '../code/reliability/sorm', - '../code/reliability/subset_simulation', - '../code/surrogates/srom', - '../code/surrogates/gpr', - '../code/surrogates/pce', - '../code/RunModel',], # path to your example scripts, - 'gallery_dirs': ['auto_examples/dimension_reduction/diffusion_maps', - 'auto_examples/dimension_reduction/pod', - 'auto_examples/dimension_reduction/grassmann', - 'auto_examples/distributions/continuous_1d', - 'auto_examples/distributions/discrete_1d', - 'auto_examples/distributions/multivariate', - 'auto_examples/distributions/user_defined', - 'auto_examples/sampling/adaptive_kriging', - 'auto_examples/sampling/importance_sampling', - 'auto_examples/sampling/monte_carlo', - 'auto_examples/sampling/latin_hypercube', - 'auto_examples/sampling/mcmc', - 'auto_examples/sampling/simplex', - 'auto_examples/sampling/true_stratified_sampling', - 'auto_examples/sampling/refined_stratified_sampling', - 'auto_examples/inference/mle', - 'auto_examples/inference/info_model_selection', - 'auto_examples/inference/bayes_parameter_estimation', - 'auto_examples/inference/bayes_model_selection', - 'auto_examples/transformations/nataf', - 'auto_examples/sensitivity/morris', - 'auto_examples/stochastic_processes/bispectral', - 'auto_examples/stochastic_processes/karhunen_loeve', - 'auto_examples/stochastic_processes/spectral', - 'auto_examples/stochastic_processes/translation', - 'auto_examples/reliability/form', - 'auto_examples/reliability/sorm', - 'auto_examples/reliability/subset_simulation', - 'auto_examples/surrogates/srom', - 'auto_examples/surrogates/gpr', - 'auto_examples/surrogates/pce', - 'auto_examples/RunModel',], # path to where to save gallery generated output - 'binder': { + "examples_dirs": [ + "../code/dimension_reduction/diffusion_maps", + "../code/dimension_reduction/pod", + "../code/dimension_reduction/grassmann", + "../code/distributions/continuous_1d", + "../code/distributions/discrete_1d", + "../code/distributions/multivariate", + "../code/distributions/user_defined", + "../code/sampling/adaptive_kriging", + "../code/sampling/importance_sampling", + "../code/sampling/monte_carlo", + "../code/sampling/latin_hypercube", + "../code/sampling/mcmc", + "../code/sampling/simplex", + "../code/sampling/true_stratified_sampling", + "../code/sampling/refined_stratified_sampling", + "../code/inference/mle", + "../code/inference/info_model_selection", + "../code/inference/bayes_parameter_estimation", + "../code/inference/bayes_model_selection", + "../code/transformations/nataf", + "../code/sensitivity/morris", + "../code/sensitivity/sobol", + "../code/stochastic_processes/bispectral", + "../code/stochastic_processes/karhunen_loeve", + "../code/stochastic_processes/spectral", + "../code/stochastic_processes/translation", + "../code/reliability/form", + "../code/reliability/sorm", + "../code/reliability/subset_simulation", + "../code/surrogates/srom", + "../code/surrogates/gpr", + "../code/surrogates/pce", + "../code/RunModel", + ], # path to your example scripts, + "gallery_dirs": [ + "auto_examples/dimension_reduction/diffusion_maps", + "auto_examples/dimension_reduction/pod", + "auto_examples/dimension_reduction/grassmann", + "auto_examples/distributions/continuous_1d", + "auto_examples/distributions/discrete_1d", + "auto_examples/distributions/multivariate", + "auto_examples/distributions/user_defined", + "auto_examples/sampling/adaptive_kriging", + "auto_examples/sampling/importance_sampling", + "auto_examples/sampling/monte_carlo", + "auto_examples/sampling/latin_hypercube", + "auto_examples/sampling/mcmc", + "auto_examples/sampling/simplex", + "auto_examples/sampling/true_stratified_sampling", + "auto_examples/sampling/refined_stratified_sampling", + "auto_examples/inference/mle", + "auto_examples/inference/info_model_selection", + "auto_examples/inference/bayes_parameter_estimation", + "auto_examples/inference/bayes_model_selection", + "auto_examples/transformations/nataf", + "auto_examples/sensitivity/morris", + "auto_examples/sensitivity/sobol", + "auto_examples/stochastic_processes/bispectral", + "auto_examples/stochastic_processes/karhunen_loeve", + "auto_examples/stochastic_processes/spectral", + "auto_examples/stochastic_processes/translation", + "auto_examples/reliability/form", + "auto_examples/reliability/sorm", + "auto_examples/reliability/subset_simulation", + "auto_examples/surrogates/srom", + "auto_examples/surrogates/gpr", + "auto_examples/surrogates/pce", + "auto_examples/RunModel", + ], # path to where to save gallery generated output + "binder": { # Required keys - 'org': 'SURGroup', - 'repo': 'UQpy', - 'branch': 'master', # Can be any branch, tag, or commit hash. Use a branch that hosts your docs. - 'binderhub_url': 'https://mybinder.org', + "org": "SURGroup", + "repo": "UQpy", + "branch": "master", # Can be any branch, tag, or commit hash. Use a branch that hosts your docs. + "binderhub_url": "https://mybinder.org", # Any URL of a binderhub deployment. Must be full URL (e.g. https://mybinder.org). - 'dependencies': './binder/requirements.txt', - 'notebooks_dir': 'notebooks', - 'use_jupyter_lab': True + "dependencies": "./binder/requirements.txt", + "notebooks_dir": "notebooks", + "use_jupyter_lab": True # Jupyter notebooks for Binder will be copied to this directory (relative to built documentation root). }, - 'ignore_pattern': '/local_', + "ignore_pattern": "/local_", } # -- Options for HTML output ------------------------------------------------- @@ -159,9 +165,9 @@ html_theme = "sphinx_rtd_theme" html_theme_options = { - 'logo_only': True, - 'style_nav_header_background': '#F0F0F0', - 'vcs_pageview_mode': 'view' + "logo_only": True, + "style_nav_header_background": "#F0F0F0", + "vcs_pageview_mode": "view", } github_url = "https://github.com/SURGroup/UQpy" @@ -173,13 +179,18 @@ html_static_path = ["_static"] html_sidebars = { - "**": ["about.html", "navigation.html", "relations.html", "searchbox.html", ] + "**": [ + "about.html", + "navigation.html", + "relations.html", + "searchbox.html", + ] } # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # -source_suffix = ['.rst', '.md'] +source_suffix = [".rst", ".md"] # source_suffix = ".rst" # The master toctree document. From 5de396e2efbeffd97c0f000be6e126bca16fc6c7 Mon Sep 17 00:00:00 2001 From: Prateek Bhustali Date: Sun, 8 May 2022 14:22:13 +0200 Subject: [PATCH 06/88] Added documentation for Sobol indices --- docs/code/sensitivity/sobol/README.rst | 24 ++++++++ docs/source/sensitivity/index.rst | 5 +- docs/source/sensitivity/sobol.rst | 79 ++++++++++++++++++++++++++ 3 files changed, 106 insertions(+), 2 deletions(-) create mode 100644 docs/code/sensitivity/sobol/README.rst create mode 100644 docs/source/sensitivity/sobol.rst diff --git a/docs/code/sensitivity/sobol/README.rst b/docs/code/sensitivity/sobol/README.rst new file mode 100644 index 000000000..1be801c21 --- /dev/null +++ b/docs/code/sensitivity/sobol/README.rst @@ -0,0 +1,24 @@ +Sobol Sensitivity indices +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +These examples serve as a guide for using the Sobol sensitivity module. They have been taken from various papers to enable validation of the implementation and have been referenced accordingly. + +Single output models +====================== +We demonstrate the computation of the Sobol indices for models with a single output using the following examples: + +1. Ishigami function +2. Exponential function +3. Sobol function with parameters a := [0., 0.5, 3., 9., 99., 99.] : Example from [2] page 11 + +Multiple output models +======================== + +We demonstrate the computation of the Sobol indices for models with multiple outputs using the following example: + +1. Mechanical oscillator ODE (numerical model): Example from [1] page 19 + + +[1] Gamboa F, Janon A, Klein T, Lagnoux A, others. Sensitivity analysis for multidimensional and functional outputs. Electronic journal of statistics 2014; 8(1): 575-603. + +[2] Saltelli, A. (2002). Making best use of model evaluations to compute indices. diff --git a/docs/source/sensitivity/index.rst b/docs/source/sensitivity/index.rst index 8fbf6b391..0e5fef5c0 100644 --- a/docs/source/sensitivity/index.rst +++ b/docs/source/sensitivity/index.rst @@ -5,10 +5,10 @@ This module contains functionality for all the sampling methods supported in :py The module currently contains the following classes: +- :py:class:`.Sobol`: Class to compute Sobol sensitivity indices. - :py:class:`.MorrisSensitivity`: Class to perform Morris. - :py:class:`.PceSensitivity`: Class to compute the sensitivity indices using the :class:`.PolynomialChaosExpansion` method. - Sensitivity analysis comprises techniques focused on determining how the variations of input variables :math:`X=\left[ X_{1}, X_{2},ā€¦,X_{d} \right]` of a mathematical model influence the response value :math:`Y=h(X)`. @@ -18,4 +18,5 @@ Sensitivity analysis comprises techniques focused on determining how the variati :caption: Sensitivity Morris Sensitivity - Polynomial Chaos Sensitivity \ No newline at end of file + Polynomial Chaos Sensitivity + Sobol Sensitivity diff --git a/docs/source/sensitivity/sobol.rst b/docs/source/sensitivity/sobol.rst new file mode 100644 index 000000000..1966330c1 --- /dev/null +++ b/docs/source/sensitivity/sobol.rst @@ -0,0 +1,79 @@ + +Sobol indices +---------------------------------------- + +Sobol indices are the standard approach to calculate a global variance based sensitivity analysis. +The indices are based on a variance decomposition of the model output. Using this decomposition allows us to assign the contribution of uncertain inputs to the variance of the model output. + +There are three main groups of indices: + +- First order indices (:math:`S_{i}`): Describe the fraction of the output variance due to a single uncertain input parameter. This amount of variance can be reduced if the uncertainty in the corresponding input is eliminated. + +- Higher order indices: Describe the fraction of the output variance due to interactions between uncertain input parameters. For example, the second order indices (:math:`S_{ij}`) describe the fraction of the output variance due to interactions between two uncertain input parameters :math:`i` and :math:`j`. + +- Total order indices (:math:`S_{T_{i}}`): Describe the fraction of the output variance due to a single input parameter and all higher order effects the input parameter is involved. + +If the first order index of an input parameter is equal to the total order index it implies that the parameter is not involved in any interaction effects. + +The Sobol indices are computed using the Pick-and-Freeze approach for single output and multi-output models. Since there are several variants of the Pick-and-Freeze approach, the schemes implemented to compute Sobol indices are listed below: + +(where, :math:`N` is the number of Monte Carlo samples and :math:`m` being the number of input parameters in the model) + +1. **First order indices** (:math:`S_{i}`) + +- Janon2014: Requires :math:`N(m + 1)` model evaluations + +.. math:: + \frac{\mathbb{V}\left[E\left(Y \mid X_{i}\right)\right]}{\mathbb{V}(Y)} = \frac{\operatorname{Cov}\left(Y, Y_{C_{i}}\right)}{\mathbb{V}(Y)} = \frac{ (1 / N) Y_{A} \cdot Y_{C_{i}}-f_{0}^{2}}{ (1 / N)\frac{Y_{A} \cdot Y_{A} + Y_{C_{i}} \cdot Y_{C_{i}}}{2}-f_{0}^{2}} + +.. math:: + y_{A}=f(A), \quad y_{C_{i}}=f(C_{i}), \quad f_{0}^{2}=\left(\frac{1}{2N} \sum_{j=1}^{N} y_{A}^{(j)} + y_{C_{i}}^{(j)} \right)^{2} + +Compared to "Sobol1993", the "Janon2014" estimator makes more efficient use of model evaluations and produces better smaller confidence intervals. + +- Sobol1993: Requires :math:`N(m + 1)` model evaluations [2]_. + +.. math:: + S_{i} = \frac{\mathbb{V}\left[E\left(Y \mid X_{i}\right)\right]}{\mathbb{V}(Y)} = \frac{ (1/N) Y_{A} \cdot Y_{C_{i}}-f_{0}^{2}}{(1 / N) Y_{A} \cdot Y_{A}-f_{0}^{2}} + +.. math:: + y_{A}=f(A), \quad y_{C_{i}}=f(C_{i}), \quad f_{0}^{2}=\left(\frac{1}{N} \sum_{j=1}^{N} y_{A}^{(j)} \right)^{2} + +- Saltelli2002: Requires :math:`N(2m + 2)` model evaluations [4]_. + +2. **Second order indices** (:math:`S_{ij}`) + +- Saltelli2002: Requires :math:`N(2m + 2)` model evaluations [4]_. + +3. **Total order indices** (:math:`S_{T_{i}}`) + +- Homma1996: Requires :math:`N(m + 1)` model evaluations [2]_. + +.. math:: + S_{T_{i}} = 1 - \frac{\mathbb{V}\left[E\left(Y \mid \mathbf{X}_{\sim_{i}}\right)\right]}{\mathbb{V}(Y)} = 1 - \frac{ (1 / N) Y_{B} \cdot Y_{C_{i}}-f_{0}^{2}}{(1 / N) Y_{A} \cdot Y_{A}-f_{0}^{2}} + +.. math:: + y_{A}=f(A), \quad y_{B}=f(B), \quad y_{C_{i}}=f(C_{i}), \quad f_{0}^{2}=\left(\frac{1}{2N} \sum_{j=1}^{N} y_{A}^{(j)} + y_{B}^{(j)} \right)^{2} + +- Saltelli2002: Requires :math:`N(2m + 2)` model evaluations [4]_. + + +Sobol Class +^^^^^^^^^^^^^^^^^^ + +The :class:`Sobol` class is imported using the following command: + +>>> from UQpy.sensitivity.Sobol import Sobol + +Methods +""""""" + +.. autoclass:: UQpy.sensitivity.Sobol + :members: run + +Examples +"""""""""" + +.. toctree:: + + Sobol Examples <../auto_examples/sensitivity/sobol/index> From c38b2e6f20013054375cf02568dc7f11446b3c42 Mon Sep 17 00:00:00 2001 From: Prateek Bhustali Date: Sun, 8 May 2022 14:22:39 +0200 Subject: [PATCH 07/88] Added examples for Sobol indices --- .../sensitivity/sobol/local_exponential.py | 20 +++ docs/code/sensitivity/sobol/local_ishigami.py | 23 ++++ .../sobol/local_mechanical_oscillator_ODE.py | 60 +++++++++ .../sensitivity/sobol/local_sobol_func.py | 42 ++++++ .../sobol/plot_mechanical_oscillator_ODE.py | 92 +++++++++++++ .../sobol/plot_sobol_exponential.py | 60 +++++++++ .../code/sensitivity/sobol/plot_sobol_func.py | 124 ++++++++++++++++++ .../sensitivity/sobol/plot_sobol_ishigami.py | 102 ++++++++++++++ 8 files changed, 523 insertions(+) create mode 100644 docs/code/sensitivity/sobol/local_exponential.py create mode 100644 docs/code/sensitivity/sobol/local_ishigami.py create mode 100644 docs/code/sensitivity/sobol/local_mechanical_oscillator_ODE.py create mode 100644 docs/code/sensitivity/sobol/local_sobol_func.py create mode 100644 docs/code/sensitivity/sobol/plot_mechanical_oscillator_ODE.py create mode 100644 docs/code/sensitivity/sobol/plot_sobol_exponential.py create mode 100644 docs/code/sensitivity/sobol/plot_sobol_func.py create mode 100644 docs/code/sensitivity/sobol/plot_sobol_ishigami.py diff --git a/docs/code/sensitivity/sobol/local_exponential.py b/docs/code/sensitivity/sobol/local_exponential.py new file mode 100644 index 000000000..1fd0ef0d9 --- /dev/null +++ b/docs/code/sensitivity/sobol/local_exponential.py @@ -0,0 +1,20 @@ +""" + +Auxiliary file +============================================== + +""" + +import numpy as np + + +def evaluate(X: np.array) -> np.array: + r"""A non-linear function that is used to demonstrate sensitivity index. + + .. math:: + f(x) = \exp(x_1 + 2*x_2) + """ + + Y = np.exp(X[:, 0] + 2 * X[:, 1]) + + return Y diff --git a/docs/code/sensitivity/sobol/local_ishigami.py b/docs/code/sensitivity/sobol/local_ishigami.py new file mode 100644 index 000000000..e5af649fe --- /dev/null +++ b/docs/code/sensitivity/sobol/local_ishigami.py @@ -0,0 +1,23 @@ +""" + +Auxiliary file +============================================== + +""" + +import numpy as np + + +def evaluate(X, params=[7, 0.1]): + """Non-monotonic Ishigami-Homma three parameter test function""" + + a = params[0] + b = params[1] + + Y = ( + np.sin(X[:, 0]) + + a * np.power(np.sin(X[:, 1]), 2) + + b * np.power(X[:, 2], 4) * np.sin(X[:, 0]) + ) + + return Y diff --git a/docs/code/sensitivity/sobol/local_mechanical_oscillator_ODE.py b/docs/code/sensitivity/sobol/local_mechanical_oscillator_ODE.py new file mode 100644 index 000000000..13b28c9fa --- /dev/null +++ b/docs/code/sensitivity/sobol/local_mechanical_oscillator_ODE.py @@ -0,0 +1,60 @@ +""" + +Auxiliary file +============================================== + +""" + +import numpy as np +from scipy.integrate import solve_ivp + + +def mech_oscillator(input_parameters): + """ + We have the second order differential equation: + + .. math:: + + m \ddot{x} + c \dot{x} + k x = 0 + + with initial conditions: :math: `x(0) = \ell`, :math: `\dot{x}(0) = 0`. + + where, for example :math: `m \sim \mathcal{U}(10, 12)`, + :math: `c \sim \mathcal{U}(0.4, 0.8)` + :math: `k \sim \mathcal{U}(70, 90)` + :math: `\ell \sim \mathcal{U}(-1, -0.25)`. + + + References + ---------- + + .. [1] Gamboa F, Janon A, Klein T, Lagnoux A, others . + Sensitivity analysis for multidimensional and functional outputs. + Electronic journal of statistics 2014; 8(1): 575-603. + + """ + + # unpack the input parameters + m, c, k, l = input_parameters[0] + + # intial conditions + x_0 = l + v_0 = 0 + + # time points + t_0 = 0 + t_f = 40 + dt = 0.05 + n_t = int((t_f - t_0) / dt) + T = np.linspace(t_0, t_f, n_t) + + def ODE(t, y): + """ + The ODE system. + """ + return np.array([y[1], -(k / m) * y[0] - (c / m) * y[1]]) + + # solve the ODE + sol = solve_ivp(ODE, [t_0, t_f], [x_0, v_0], method="RK45", t_eval=T) + + return sol.y[0] diff --git a/docs/code/sensitivity/sobol/local_sobol_func.py b/docs/code/sensitivity/sobol/local_sobol_func.py new file mode 100644 index 000000000..1ccabc6dd --- /dev/null +++ b/docs/code/sensitivity/sobol/local_sobol_func.py @@ -0,0 +1,42 @@ +""" + +Auxiliary file +============================================== + +""" + +import numpy as np +import copy + + +def evaluate(X, a_values): + + dims = len(a_values) + g = 1 + + for i in range(dims): + g_i = (np.abs(4 * X[:, i] - 2) + a_values[i]) / (1 + a_values[i]) + g *= g_i + + return g + + +def sensitivities(a_values): + + dims = len(a_values) + + Total_order = np.zeros((dims, 1)) + + V_i = (3 * (1 + a_values) ** 2) ** (-1) + + total_variance = np.prod(1 + V_i) - 1 + + First_order = V_i / total_variance + + for i in range(dims): + + rem_First_order = copy.deepcopy(V_i) + rem_First_order[i] = 0 + Total_order[i] = V_i[i] * np.prod(rem_First_order + 1) / total_variance + + return First_order.reshape(-1, 1), Total_order diff --git a/docs/code/sensitivity/sobol/plot_mechanical_oscillator_ODE.py b/docs/code/sensitivity/sobol/plot_mechanical_oscillator_ODE.py new file mode 100644 index 000000000..6e03332d0 --- /dev/null +++ b/docs/code/sensitivity/sobol/plot_mechanical_oscillator_ODE.py @@ -0,0 +1,92 @@ +r""" + +Mechanical oscillator model (multioutput) +============================================== + +The mechanical oscillator is governed by the following second-order ODE: + +.. math:: + m \ddot{x} + c \dot{x} + k x = 0 + +.. math:: + x(0) = \ell, \dot{x}(0) = 0. + +The parameteres of the oscillator are modeled as follows: + +.. math:: + m \sim \mathcal{U}(10, 12), c \sim \mathcal{U}(0.4, 0.8), k \sim \mathcal{U}(70, 90), \ell \sim \mathcal{U}(-1, -0.25). + +""" + +# %% +import numpy as np +import matplotlib.pyplot as plt + +from UQpy.run_model.RunModel import RunModel +from UQpy.run_model.model_execution.PythonModel import PythonModel +from UQpy.distributions import Uniform +from UQpy.distributions.collection.JointIndependent import JointIndependent +from UQpy.sensitivity.sobol import Sobol + +# %% +# Create Model object +model = PythonModel( + model_script="local_mechanical_oscillator_ODE.py", + model_object_name="mech_oscillator", + var_names=[r"$m$", "$c$", "$k$", "$\ell$"], + delete_files=True, +) + +runmodel_obj = RunModel(model=model) + +# Define distribution object +M = Uniform(10, (12 - 10)) +C = Uniform(0.4, (0.8 - 0.4)) +K = Uniform(70, (90 - 70)) +L = Uniform(-1, (-0.25 - -1)) +dist_object = JointIndependent([M, C, K, L]) + +# %% +SA = Sobol(runmodel_obj, dist_object) + +computed_indices = SA.run(n_samples=500) + +# %% +# Plot the Sobol indices +t_0 = 0 +t_f = 40 +dt = 0.05 +n_t = int((t_f - t_0) / dt) +T = np.linspace(t_0, t_f, n_t) + +fig, ax = plt.subplots(1, 2, figsize=(16, 8)) + +ax[0].plot(T, computed_indices["sobol_total_i"][0, :], "r", label=r"$m$") +ax[0].plot(T, computed_indices["sobol_total_i"][1, :], "g", label=r"$c$") +ax[0].plot(T, computed_indices["sobol_total_i"][2, :], label=r"$k$", color="royalblue") +ax[0].plot( + T, computed_indices["sobol_total_i"][3, :], label=r"$\ell$", color="aquamarine" +) + +ax[0].set_title("Total order Sobol indices", fontsize=16) +ax[0].set_xlabel("time (s)", fontsize=16) +ax[0].set_ylabel(r"$S_{T_i}$", fontsize=16) +ax[0].set_xbound(0, t_f) +ax[0].set_ybound(-0.2, 1.2) +ax[0].legend() + +ax[1].plot(T, computed_indices["sobol_i"][0, :], "r", label=r"$m$") +ax[1].plot(T, computed_indices["sobol_i"][1, :], "g", label=r"$c$") +ax[1].plot(T, computed_indices["sobol_i"][2, :], label=r"$k$", color="royalblue") +ax[1].plot(T, computed_indices["sobol_i"][3, :], label=r"$\ell$", color="aquamarine") + +ax[1].set_title("First order Sobol indices", fontsize=16) +ax[1].set_xlabel("time (s)", fontsize=16) +ax[1].set_ylabel(r"$S_i$", fontsize=16) +ax[1].set_xbound(0, t_f) +ax[1].set_ybound(-0.2, 1.2) +ax[1].legend(fontsize=12) + +fig.suptitle("Pointwise-in-time Sobol indices", fontsize=20) + +plt.show() diff --git a/docs/code/sensitivity/sobol/plot_sobol_exponential.py b/docs/code/sensitivity/sobol/plot_sobol_exponential.py new file mode 100644 index 000000000..81c9b78e9 --- /dev/null +++ b/docs/code/sensitivity/sobol/plot_sobol_exponential.py @@ -0,0 +1,60 @@ +""" + +Exponential function +============================================== + +.. math:: + f(x) := \exp(x_1 + 2x_2), \quad x_1, x_2 \sim \mathcal{N}(0, 1) + +""" + +# %% +from UQpy.run_model.RunModel import RunModel +from UQpy.run_model.model_execution.PythonModel import PythonModel +from UQpy.distributions import Normal +from UQpy.distributions.collection.JointIndependent import JointIndependent +from UQpy.sensitivity.sobol import Sobol + +# %% +# Create Model object +model = PythonModel( + model_script="local_exponential.py", + model_object_name="evaluate", + var_names=[ + "X_1", + "X_2", + ], + delete_files=True, +) + +runmodel_obj = RunModel(model=model) + +# Define distribution object +dist_object = JointIndependent([Normal(0, 1)] * 2) + +# %% [markdown] +# Compute Sobol indices + +# %% +SA = Sobol(runmodel_obj, dist_object) + +# Compute Sobol indices using the pick and freeze algorithm +computed_indices = SA.run( + n_samples=100_000, num_bootstrap_samples=1_000, confidence_level=0.95 +) + +# %% [markdown] +# Expected first order Sobol indices (computed analytically): +# +# X1: 0.0118 +# +# X2: 0.3738 + +# %% +computed_indices["sobol_i"] + +# %% [markdown] +# Confidence intervals for first order Sobol indices + +# %% +computed_indices["CI_sobol_i"] diff --git a/docs/code/sensitivity/sobol/plot_sobol_func.py b/docs/code/sensitivity/sobol/plot_sobol_func.py new file mode 100644 index 000000000..0f7f7ed0d --- /dev/null +++ b/docs/code/sensitivity/sobol/plot_sobol_func.py @@ -0,0 +1,124 @@ +r""" + +Sobol function +============================================== + +.. math:: + + g(x_1, x_2, \ldots, x_D) := \prod_{i=1}^{D} \frac{|4x_i - 2| + a_i}{1 + a_i}, + +where, + +.. math:: + x_i \sim \mathcal{U}(0, 1), \quad a_i \in \mathbb{R}. + +""" + +# %% +import numpy as np + +from UQpy.run_model.RunModel import RunModel +from UQpy.run_model.model_execution.PythonModel import PythonModel +from UQpy.distributions import Uniform +from UQpy.distributions.collection.JointIndependent import JointIndependent +from UQpy.sensitivity.sobol import Sobol + +# %% +# Create Model object +num_vars = 6 +a_vals = np.array([0.0, 0.5, 3.0, 9.0, 99.0, 99.0]) + +model = PythonModel( + model_script="local_sobol_func.py", + model_object_name="evaluate", + var_names=["X_" + str(i) for i in range(num_vars)], + delete_files=True, + a_values=a_vals, +) + +runmodel_obj = RunModel(model=model) + +# Define distribution object +dist_object = JointIndependent([Uniform(0, 1)] * num_vars) + +# %% [markdown] +# #### Compute Sobol indices + +# %% +SA = Sobol(runmodel_obj, dist_object) + +# Compute Sobol indices using the pick and freeze algorithm +computed_indices = SA.run(n_samples=50_000, estimate_second_order=True) + +# %% [markdown] +# First order Sobol indices +# +# $S_1$ = 5.86781190e-01 +# +# $S_2$ = 2.60791640e-01 +# +# $S_3$ = 3.66738244e-02 +# +# $S_4$ = 5.86781190e-03 +# +# $S_5$ = 5.86781190e-05 +# +# $S_6$ = 5.86781190e-05 + +# %% +computed_indices["sobol_i"] + +# %% [markdown] +# Total order Sobol indices +# +# $S_{T_1}$ = 6.90085892e-01 +# +# $S_{T_2}$ = 3.56173364e-01 +# +# $S_{T_3}$ = 5.63335422e-02 +# +# $S_{T_4}$ = 9.17057664e-03 +# +# $S_{T_5}$ = 9.20083854e-05 +# +# $S_{T_6}$ = 9.20083854e-05 +# + +# %% +computed_indices["sobol_total_i"] + +# %% [markdown] +# Second-order Sobol indices +# +# $S_{12}$ = 0.0869305 +# +# $S_{13}$ = 0.0122246 +# +# $S_{14}$ = 0.00195594 +# +# $S_{15}$ = 0.00001956 +# +# $S_{16}$ = 0.00001956 +# +# $S_{23}$ = 0.00543316 +# +# $S_{24}$ = 0.00086931 +# +# $S_{25}$ = 0.00000869 +# +# $S_{26}$ = 0.00000869 +# +# $S_{34}$ = 0.00012225 +# +# $S_{35}$ = 0.00000122 +# +# $S_{36}$ = 0.00000122 +# +# $S_{45}$ = 0.00000020 +# +# $S_{46}$ = 0.00000020 +# +# $S_{56}$ = 2.0e-9 + +# %% +computed_indices["sobol_ij"] diff --git a/docs/code/sensitivity/sobol/plot_sobol_ishigami.py b/docs/code/sensitivity/sobol/plot_sobol_ishigami.py new file mode 100644 index 000000000..a448a61b4 --- /dev/null +++ b/docs/code/sensitivity/sobol/plot_sobol_ishigami.py @@ -0,0 +1,102 @@ +r""" + +Ishigami function +============================================== + +.. math:: + f(x_1, x_2, x_3) = sin(x_1) + a \cdot sin^2(x_2) + b \cdot x_3^4 sin(x_1) + +.. math:: + x_1, x_2, x_3 \sim \mathcal{U}(-\pi, \pi), \quad a, b\in \mathbb{R} + +First order Sobol indices +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. math:: + S_1 = \frac{V_1}{\mathbb{V}[Y]}, \quad S_2 = \frac{V_2}{\mathbb{V}[Y]}, \quad S_3 = \frac{V_3}{\mathbb{V}[Y]} = 0, + +.. math:: + V_1 = 0.5 (1 + \frac{b\pi^4}{5})^2, \quad V_2 = \frac{a^2}{8}, \quad V_3 = 0 + +.. math:: + \mathbb{V}[Y] = \frac{a^2}{8} + \frac{b\pi^4}{5} + \frac{b^2\pi^8}{18} + \frac{1}{2} + +Total order Sobol indices +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. math:: + S_{T_1} = \frac{V_{T1}}{\mathbb{V}[Y]}, \quad S_{T_2} = \frac{V_{T2}}{\mathbb{V}[Y]}, \quad S_{T_3} = \frac{V_{T3}}{\mathbb{V}[Y]} + +.. math:: + V_{T1} = 0.5 (1 + \frac{b\pi^4}{5})^2 + \frac{8b^2\pi^8}{225}, \quad V_{T2}= \frac{a^2}{8}, \quad V_{T3} = \frac{8b^2\pi^8}{225} + +.. math:: + \mathbb{V}[Y] = \frac{a^2}{8} + \frac{b\pi^4}{5} + \frac{b^2\pi^8}{18} + \frac{1}{2} + +""" + +# %% +import numpy as np + +from UQpy.run_model.RunModel import RunModel +from UQpy.run_model.model_execution.PythonModel import PythonModel +from UQpy.distributions import Uniform +from UQpy.distributions.collection.JointIndependent import JointIndependent +from UQpy.sensitivity.sobol import Sobol + +# %% +# Create Model object +model = PythonModel( + model_script="local_ishigami.py", + model_object_name="evaluate", + var_names=[r"$X_1$", "$X_2$", "$X_3$"], + delete_files=True, + params=[7, 0.1], +) + +runmodel_obj = RunModel(model=model) + +# Define distribution object +dist_object = JointIndependent([Uniform(-np.pi, 2 * np.pi)] * 3) + +# %% +SA = Sobol(runmodel_obj, dist_object) + +computed_indices = SA.run(n_samples=100_000, num_bootstrap_samples=100) + +# %% [markdown] +# Expected first order Sobol indices: +# +# X1: 0.3139 +# +# X2: 0.4424 +# +# X3: 0.0 +# + +# %% +computed_indices["sobol_i"] + +# %% [markdown] +# Expected total order Sobol indices: +# +# X1: 0.55758886 +# +# X2: 0.44241114 +# +# X3: 0.24368366 + +# %% +computed_indices["sobol_total_i"] + +# %% [markdown] +# Confidence intervals for first order Sobol indices + +# %% +computed_indices["CI_sobol_i"] + +# %% [markdown] +# Confidence intervals for total order Sobol indices + +# %% +computed_indices["CI_sobol_total_i"] From 068fde50f05f5084cfee6a33bab741f175637464 Mon Sep 17 00:00:00 2001 From: Prateek Bhustali Date: Sun, 8 May 2022 20:37:09 +0200 Subject: [PATCH 08/88] =?UTF-8?q?Added=20Cram=C3=A9r-von=20Mises=20sensiti?= =?UTF-8?q?vity=20index?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/UQpy/sensitivity/__init__.py | 2 + src/UQpy/sensitivity/cramer_von_mises.py | 340 +++++++++++++++++++++++ 2 files changed, 342 insertions(+) create mode 100644 src/UQpy/sensitivity/cramer_von_mises.py diff --git a/src/UQpy/sensitivity/__init__.py b/src/UQpy/sensitivity/__init__.py index 5a5b0d997..10ca1565d 100644 --- a/src/UQpy/sensitivity/__init__.py +++ b/src/UQpy/sensitivity/__init__.py @@ -1,7 +1,9 @@ from UQpy.sensitivity.MorrisSensitivity import MorrisSensitivity from UQpy.sensitivity.PceSensitivity import PceSensitivity from UQpy.sensitivity.sobol import Sobol +from UQpy.sensitivity.cramer_von_mises import CramervonMises from . import MorrisSensitivity from . import PceSensitivity from . import Sobol +from . import CramervonMises diff --git a/src/UQpy/sensitivity/cramer_von_mises.py b/src/UQpy/sensitivity/cramer_von_mises.py new file mode 100644 index 000000000..9cb2fdfe6 --- /dev/null +++ b/src/UQpy/sensitivity/cramer_von_mises.py @@ -0,0 +1,340 @@ +""" +Computing the CramĆ©r-von Mises sensitivity indices. + +References +---------- + +.. [1] Gamboa, F., Klein, T., & Lagnoux, A. (2018). Sensitivity Analysis + Based on CramĆ©r-von Mises Distance. SIAM/ASA Journal on Uncertainty + Quantification, 6(2), 522-548. doi:10.1137/15M1025621 + +.. [2] Gamboa, F., Gremaud, P., Klein, T., & Lagnoux, A. (2020). Global + Sensitivity Analysis: a new generation of mighty estimators based on + rank statistics. arXiv [math.ST]. http://arxiv.org/abs/2003.01772 + +""" + +import logging + +import numpy as np + +from UQpy.sensitivity.baseclass.sensitivity import Sensitivity +from UQpy.sensitivity.baseclass.pickfreeze import generate_pick_freeze_samples +from UQpy.sensitivity.sobol import compute_first_order as compute_first_order_sobol +from UQpy.sensitivity.sobol import compute_total_order as compute_total_order_sobol +from UQpy.utilities.UQpyLoggingFormatter import UQpyLoggingFormatter + +# TODO: Sampling strategies + + +class CramervonMises(Sensitivity): + """ + Compute the CramĆ©r-von Mises indices. + + Currently only available for models with scalar output. + + :param runmodel_object: The computational model. It should be of type :class:`.RunModel`. \ + The output QoI can be a scalar or vector of length :code:`ny`, then the sensitivity \ + indices of all :code:`ny` outputs are computed independently. + + :param distributions: List of :class:`.Distribution` objects corresponding to each \ + random variable, or :class:`.JointIndependent` object \ + (multivariate RV with independent marginals). + + **Methods:** + """ + + def __init__( + self, runmodel_object, dist_object, random_state=None, **kwargs + ) -> None: + + super().__init__( + runmodel_object, dist_object, random_state=random_state, **kwargs + ) + + # Create logger with the same name as the class + self.logger = logging.getLogger(__name__) + self.logger.setLevel(logging.ERROR) + frmt = UQpyLoggingFormatter() + + # create console handler with a higher log level + ch = logging.StreamHandler() + ch.setFormatter(frmt) + + # add the handler to the logger + self.logger.addHandler(ch) + + self.CVM_i = None + "First order CramĆ©r-von Mises indices, :class:`numpy.ndarray` of shape :code:`(num_vars, 1)`" + + self.CI_CVM_i = None + "Confidence intervals of the first order CramĆ©r-von Mises indices, :class:`numpy.ndarray` of shape :code:`(num_vars, 2)`" + + self.sobol_i = None + "First order Sobol indices computed using the pick-and-freeze samples, :class:`numpy.ndarray` of shape :code:`(num_vars, 1)`" + + self.sobol_total_i = None + "Total order Sobol indices computed using the pick-and-freeze samples, :class:`numpy.ndarray` of shape :code:`(num_vars, 1)`" + + self.n_samples = None + "Number of samples used to compute the CramĆ©r-von Mises indices, :class:`int`" + + self.num_vars = None + "Number of random variables, :class:`int`" + + def run( + self, + n_samples=1_000, + estimate_sobol_indices=False, + num_bootstrap_samples=None, + confidence_level=0.95, + disable_CVM_indices=False, + ): + + """ + Compute the CramĆ©r-von Mises indices. + + :param n_samples: Number of samples used to compute the CramĆ©r-von Mises indices. \ + If :code:`None`, the number of samples is set to the number of samples \ + in the model. + + :param estimate_sobol_indices: If :code:`True`, the Sobol indices are estimated \ + using the pick-and-freeze samples. + + :param num_bootstrap_samples: Number of bootstrap samples used to estimate the \ + Sobol indices. If :code:`None`, the number of bootstrap samples is set \ + to the number of samples in the model. + + :param confidence_level: Confidence level used to compute the confidence \ + intervals of the CramĆ©r-von Mises indices. + + :param disable_CVM_indices: If :code:`True`, the CramĆ©r-von Mises indices \ + are not computed. + + :return: A :class:`dict` with the following keys: \ + :code:`CVM_i` of shape :code:`(num_vars, 1)`, \ + :code:`CI_CVM_i` of shape :code:`(num_vars, 2)`, \ + :code:`sobol_i` of shape :code:`(num_vars, 1)`, \ + :code:`sobol_total_i` of shape :code:`(num_vars, 1)`. + + """ + + # Check nsamples + self.n_samples = n_samples + if not isinstance(self.n_samples, int): + raise TypeError("UQpy: nsamples should be an integer") + + # Check num_bootstrap_samples data type + if num_bootstrap_samples is not None: + if not isinstance(num_bootstrap_samples, int): + raise TypeError("UQpy: num_bootstrap_samples should be an integer.\n") + elif num_bootstrap_samples is None: + self.logger.info( + "UQpy: num_bootstrap_samples is set to None, confidence intervals will not be computed.\n" + ) + + ################## GENERATE SAMPLES ################## + + A_samples, W_samples, C_i_generator, _ = generate_pick_freeze_samples( + self.dist_object, self.n_samples, self.random_state + ) + + self.logger.info("UQpy: Generated samples using the pick-freeze scheme.\n") + + ################# MODEL EVALUATIONS #################### + + A_model_evals = self._run_model(A_samples).reshape(-1, 1) + + self.logger.info("UQpy: Model evaluations A completed.\n") + + W_model_evals = self._run_model(W_samples).reshape(-1, 1) + + self.logger.info("UQpy: Model evaluations W completed.\n") + + self.num_vars = A_samples.shape[1] + + C_i_model_evals = np.zeros((self.n_samples, self.num_vars)) + + for i, C_i in enumerate(C_i_generator): + C_i_model_evals[:, i] = self._run_model(C_i).ravel() + + self.logger.info("UQpy: Model evaluations C completed.\n") + + self.logger.info("UQpy: All model evaluations computed successfully.\n") + + ######################### STORAGE ######################## + + # Create dictionary to store the sensitivity indices + computed_indices = {} + + ################## COMPUTE CVM INDICES ################## + + # flag is used to disable computation of + # CVM indices during testing + if not disable_CVM_indices: + # Compute the CramĆ©r-von Mises indices + self.CVM_i = self.pick_and_freeze_estimator( + A_model_evals, W_model_evals, C_i_model_evals + ) + + self.logger.info("UQpy: CramĆ©r-von Mises indices computed successfully.\n") + + # Store the indices in the dictionary + computed_indices["CVM_i"] = self.CVM_i + + ################# COMPUTE CONFIDENCE INTERVALS ################## + + if num_bootstrap_samples is not None: + + self.logger.info("UQpy: Computing confidence intervals ...\n") + + estimator_inputs = [ + A_model_evals, + W_model_evals, + C_i_model_evals, + ] + + self.CI_CVM_i = self.bootstrapping( + self.pick_and_freeze_estimator, + estimator_inputs, + computed_indices["CVM_i"], + num_bootstrap_samples, + confidence_level, + ) + + self.logger.info( + "UQpy: Confidence intervals for CramĆ©r-von Mises indices computed successfully.\n" + ) + + # Store the indices in the dictionary + computed_indices["CI_CVM_i"] = self.CI_CVM_i + + ################## COMPUTE SOBOL INDICES ################## + + if estimate_sobol_indices: + + self.logger.info("UQpy: Computing First order Sobol indices ...\n") + + # extract shape + _shape = C_i_model_evals.shape + + # convert C_i_model_evals to 3D array + # with n_outputs=1 in first dimension + n_outputs = 1 + C_i_model_evals = C_i_model_evals.reshape((n_outputs, *_shape)) + + self.sobol_i = compute_first_order_sobol( + A_model_evals, W_model_evals, C_i_model_evals + ) + + self.logger.info("UQpy: First order Sobol indices computed successfully.\n") + + self.sobol_total_i = compute_total_order_sobol( + A_model_evals, W_model_evals, C_i_model_evals + ) + + self.logger.info("UQpy: Total order Sobol indices computed successfully.\n") + + # Store the indices in the dictionary + computed_indices["sobol_i"] = self.sobol_i + computed_indices["sobol_total_i"] = self.sobol_total_i + + return computed_indices + + @staticmethod + def indicator_function(Y, W): + """ + Vectorized version of the indicator function. + + .. math:: + \mathbb{I}(Y,W) = \mathbf{1}_{Y \leq W} + + **Inputs:** + + * **Y** (`ndarray`): + Vector of values of the random variable. + Shape: `(N, 1)` + + * **W** (`ndarray`): + Vector of values of the random variable. + Shape: `(N, 1)` + + **Outputs:** + + * **indicator** (`ndarray`): + Shape: `(N, 1)` + + """ + return (Y <= W.T).astype(int) + + def pick_and_freeze_estimator(self, A_model_evals, W_model_evals, C_i_model_evals): + + """ + Compute the first order CramĆ©r-von Mises indices + using the Pick-and-Freeze estimator. + + **Inputs** + + * **A_model_evals** (`np.array`): + Shape: `(n_samples, 1)` + + * **W_model_evals** (`np.array`): + Shape: `(n_samples, 1)` + + * **C_i_model_evals** (`np.array`): + Shape: `(n_samples, num_vars)` + + **Outputs** + + * **First_order_CVM** (`np.array`): + Shape: `(num_vars)` + + """ + + ## **Notes** + + # Implementation using 2 `for` loops. This is however + # faster than the vectorized version which has only 1 `for` loop. + + # For N = 50_000 runs + # With 2 `for` loops: 26.75 seconds (this implementation) + # With 1 `for` loops: 62.42 seconds (vectorized implementation) + + # Possible improvements: + # Check indicator function run time using a profiler + # as it results in an `N` x `N` array. + # Q. Does it use a for loop under the hood? + # Computations such as `np.sum` and `np.mean` + # are handled by numpy so they are fast. + # (This should however be faster for small `N`, e.g. N=10_000) + + N = self.n_samples + m = self.num_vars + + # Model evaluations + f_A = A_model_evals.ravel() + f_W = W_model_evals.ravel() + f_C_i = C_i_model_evals + + # Store CramĆ©rvonMises indices + First_order_indices = np.zeros((m, 1)) + + # Compute CramĆ©r-von Mises indices + for i in range(m): + sum_numerator = 0 + sum_denominator = 0 + + for k in range(N): + + term_1 = self.indicator_function(f_A, f_W[k]) + term_2 = self.indicator_function(f_C_i[:, i], f_W[k]) + + mean_sum = (1 / (2 * N)) * np.sum(term_1 + term_2) + mean_product = (1 / N) * np.sum(term_1 * term_2) + + sum_numerator += mean_product - mean_sum**2 + sum_denominator += mean_sum - mean_sum**2 + + First_order_indices[i] = sum_numerator / sum_denominator + + return First_order_indices From f16288d6d537bb96cded08ae98f7a11d3ebb4021 Mon Sep 17 00:00:00 2001 From: Prateek Bhustali Date: Sun, 8 May 2022 20:39:36 +0200 Subject: [PATCH 09/88] =?UTF-8?q?Add=20unit=20tests=20for=20Cram=C3=A9r-vo?= =?UTF-8?q?n=20Mises=20sensitivity?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- tests/unit_tests/sensitivity/exponential.py | 25 ++ .../sensitivity/test_cramer_von_mises.py | 338 ++++++++++++++++++ tests/unit_tests/sensitivity/test_sobol.py | 1 - 3 files changed, 363 insertions(+), 1 deletion(-) create mode 100644 tests/unit_tests/sensitivity/exponential.py create mode 100644 tests/unit_tests/sensitivity/test_cramer_von_mises.py diff --git a/tests/unit_tests/sensitivity/exponential.py b/tests/unit_tests/sensitivity/exponential.py new file mode 100644 index 000000000..dc8c90df3 --- /dev/null +++ b/tests/unit_tests/sensitivity/exponential.py @@ -0,0 +1,25 @@ +import numpy as np + + +def evaluate(X: np.array) -> np.array: + r"""A non-linear function that is used to test Cramer-von Mises sensitivity index. + + .. math:: + f(x) = \exp(x_1 + 2*x_2) + + Parameters + ---------- + X : np.array + An `N*D` array holding values for each parameter, where `N` is the + number of samples and `D` is the number of parameters + (in this case, 2). + + Returns + ------- + np.array + [description] + """ + + Y = np.exp(X[:, 0] + 2 * X[:, 1]) + + return Y diff --git a/tests/unit_tests/sensitivity/test_cramer_von_mises.py b/tests/unit_tests/sensitivity/test_cramer_von_mises.py new file mode 100644 index 000000000..46cebb429 --- /dev/null +++ b/tests/unit_tests/sensitivity/test_cramer_von_mises.py @@ -0,0 +1,338 @@ +""" +This is the test module for Cramer sensitivity indices. + +Here, we will use the an exponential function to test the output. + +The following methods are tested: +1. pick_and_freeze_estimator +2. bootstrap_variance_computation + +Important +---------- +The computed indices are computed using the `np.isclose` function. + +Function signature: + numpy.isclose(a, b, rtol=1e-05, atol=1e-08, equal_nan=False) + + Parameters: + a, b: array_like + Input arrays to compare. + + rtol: float + The relative tolerance parameter. + + atol: float + The absolute tolerance parameter. + +Each element of the `diff` array is compared as follows: +diff = |a - b| +diff <= atol + rtol * abs(b) + +- relative tolerance: rtol * abs(b) + It is the maximum allowed difference between a and b, + relative to the absolute value of b. + For example, to set a tolerance of 1%, pass rol=0.01, + which assures that the values are within 2 decimal places of each other. + +- absolute tolerance: atol + When b is close to zero, the atol value is used. + +""" + +import numpy as np +import pytest +import scipy + +from UQpy.run_model.RunModel import RunModel +from UQpy.run_model.model_execution.PythonModel import PythonModel +from UQpy.distributions import Normal, Uniform +from UQpy.distributions.collection.JointIndependent import JointIndependent +from UQpy.sensitivity.cramer_von_mises import CramervonMises + +# Prepare +############################################################################### + +# Prepare the input distribution +@pytest.fixture() +def exponential_input_dist_object(): + """ + This function returns the input distribution for the Ishigami function. + + X1 ~ Normal(0,1) + X2 ~ Normal(0,1) + + """ + return JointIndependent([Normal(0, 1)] * 2) + + +@pytest.fixture() +def exponential_model_object(): + """This function creates the exponential run_model_object""" + model = PythonModel( + model_script="exponential.py", + model_object_name="evaluate", + var_names=[r"$X_1$", "$X_2$"], + delete_files=True, + ) + + runmodel_obj = RunModel(model=model) + + return runmodel_obj + + +@pytest.fixture() +def CVM_object(exponential_model_object, exponential_input_dist_object): + """This function returns the CVM object.""" + + return CramervonMises(exponential_model_object, exponential_input_dist_object) + + +@pytest.fixture() +def analytical_exponential_CVM_indices(): + """This function returns the analytical Cramer-von-Mises indices. + + S1_CVM = (6/np.pi) * np.arctan(2) - 2 + S2_CVM = (6/np.pi) * np.arctan(np.sqrt(19)) - 2 + + print(np.around(S1_CVM, 4)) + print(np.around(S2_CVM, 4)) + + """ + + return np.array([[0.1145], [0.5693]]) + + +@pytest.fixture() +def numerical_exponential_CVM_indices(CVM_object): + """ + This function returns the Cramer-von-Mises indices + computed using the Pick and Freeze algorithm. + + """ + + SA = CVM_object + + np.random.seed(12345) #! set seed for reproducibility + + computed_indices = SA.run(n_samples=50_000) + + return computed_indices["CVM_i"] + + +@pytest.fixture() +def NUM_SAMPLES(): + """This function returns the number of samples.""" + + num_bootstrap_samples = 50 + num_samples = 10_000 + + return num_bootstrap_samples, num_samples + + +@pytest.fixture() +def bootstrap_CVM_index_variance(CVM_object, NUM_SAMPLES): + """This function returns the variance in the computed Cramer-von-Mises index + computed using the bootstrap algorithm.""" + + #### SETUP #### + SA = CVM_object + + np.random.seed(12345) #! set seed for reproducibility + + confidence_level = 0.95 + delta = -scipy.stats.norm.ppf((1 - confidence_level) / 2) + + num_bootstrap_samples, n_samples = NUM_SAMPLES + + #### Compute indices #### + computed_indices = SA.run( + n_samples=n_samples, + num_bootstrap_samples=num_bootstrap_samples, + confidence_level=confidence_level, + ) + + First_order = computed_indices["CVM_i"].ravel() + upper_bound_first_order = computed_indices["CI_CVM_i"][:, 1] + + #### Compute variance #### + std_bootstrap_first_order = (upper_bound_first_order - First_order) / delta + + return std_bootstrap_first_order**2 + + +@pytest.fixture() +def model_evals_CVM_index_variance(): + + """ + runmodel_obj = RunModel( + model_script='exponential.py', + var_names=['X1', 'X2'], + vec=True, delete_files=True) + + input_object = JointInd([Normal(0, 1)]*2) + + SA = CramervonMises(runmodel_obj, input_object) + + np.random.seed(12345) + + num_repetitions, n_samples = 1_000, 10_000 + + num_vars = 2 + + sample_first_order = np.zeros((num_vars, num_repetitions)) + + for i in range(num_repetitions): + CV_First_order = SA.run(n_samples=n_samples) + + sample_first_order[:, i] = CV_First_order.ravel() + + variance_first_order = np.var(sample_first_order, axis=1).reshape(-1, 1) + + print(variance_first_order) + + """ + + variance_first_order = np.array([4.01099066e-05, 2.06802165e-05]) + + return variance_first_order + + +@pytest.fixture() +def ishigami_input_dist_object(): + """ + This function returns the input distribution for the Ishigami function. + + X1 ~ Uniform(-pi, pi) + X2 ~ Uniform(-pi, pi) + X3 ~ Uniform(-pi, pi) + + """ + return JointIndependent([Uniform(-np.pi, 2 * np.pi)] * 3) + + +@pytest.fixture() +def ishigami_model_object(): + """This function creates the Ishigami run_model_object""" + model = PythonModel( + model_script="ishigami.py", + model_object_name="evaluate", + var_names=[r"$X_1$", "$X_2$", "$X_3$"], + delete_files=True, + params=[7, 0.1], + ) + + runmodel_obj = RunModel(model=model) + + return runmodel_obj + + +@pytest.fixture() +def CVM_object_ishigami(ishigami_model_object, ishigami_input_dist_object): + """This function returns the CVM object.""" + + return CramervonMises(ishigami_model_object, ishigami_input_dist_object) + + +@pytest.fixture() +def numerical_Sobol_indices(CVM_object_ishigami): + """ + This function returns the Sobol indices computed + using the Pick and Freeze algorithm. + """ + + SA = CVM_object_ishigami + + np.random.seed(12345) + + computed_indices = SA.run( + n_samples=500_000, estimate_sobol_indices=True, disable_CVM_indices=True + ) + + return computed_indices["sobol_i"], computed_indices["sobol_total_i"] + + +@pytest.fixture() +def analytical_ishigami_Sobol_indices(): + """ + Analytical Sobol indices for the Ishigami function. + + Copy-paste the following to reproduce the given indices: + + a = 7 + b = 0.1 + + V1 = 0.5*(1 + (b*np.pi**4)/5)**2 + V2 = (a**2)/8 + V3 = 0 + + VT3 = (8*(b**2)*np.pi**8)/225 + VT1 = V1 + VT3 + VT2 = V2 + + total_variance = V2 + (b*np.pi**4)/5 + ((b**2) * np.pi**8)/18 + 0.5 + + S = np.array([V1, V2, V3])/total_variance + S_T = np.array([VT1, VT2, VT3])/total_variance + + S = np.around(S, 4) + S_T = np.around(S_T, 4) + + """ + + S1 = 0.3139 + S2 = 0.4424 + S3 = 0 + + S_T1 = 0.5576 + S_T2 = 0.4424 + S_T3 = 0.2437 + + S = np.array([S1, S2, S3]) + S_T = np.array([S_T1, S_T2, S_T3]) + + return S.reshape(-1, 1), S_T.reshape(-1, 1) + + +# Unit tests +############################################################################### + + +def test_pick_and_freeze_estimator( + numerical_exponential_CVM_indices, analytical_exponential_CVM_indices +): + """ + This function tests the pick_and_freeze_estimator method using 50_000 samples. + """ + S_CVM_analytical = analytical_exponential_CVM_indices + S_CVM_numerical = numerical_exponential_CVM_indices + + assert np.isclose(S_CVM_analytical, S_CVM_numerical, rtol=0, atol=1e-2).all() + + +def test_bootstrap_variance_computation( + bootstrap_CVM_index_variance, model_evals_CVM_index_variance +): + """ + This function tests the bootstrap_variance_computation method using + 100_000 samples and 1_000 bootstrap samples. + """ + var_first = model_evals_CVM_index_variance + boot_var_first = bootstrap_CVM_index_variance + + assert var_first.shape == boot_var_first.shape + assert np.isclose(boot_var_first, var_first, rtol=0, atol=1e-4).all() + + +def test_Sobol_estimate_computation( + numerical_Sobol_indices, analytical_ishigami_Sobol_indices +): + """ + This function tests the Sobol_estimate_computation method using 1_000_000 samples. + """ + S_numerical, S_T_numerical = numerical_Sobol_indices + S_analytical, S_T_analytical = analytical_ishigami_Sobol_indices + + assert S_analytical.shape == S_numerical.shape + assert S_T_analytical.shape == S_T_numerical.shape + assert np.isclose(S_numerical, S_analytical, rtol=0, atol=1e-2).all() + assert np.isclose(S_T_numerical, S_T_analytical, rtol=0, atol=1e-2).all() diff --git a/tests/unit_tests/sensitivity/test_sobol.py b/tests/unit_tests/sensitivity/test_sobol.py index 64882a155..784eb6143 100644 --- a/tests/unit_tests/sensitivity/test_sobol.py +++ b/tests/unit_tests/sensitivity/test_sobol.py @@ -47,7 +47,6 @@ """ -import ntpath import numpy as np import pytest import scipy From 27305fc4ecb3a9342892bc0b88446cf77f970979 Mon Sep 17 00:00:00 2001 From: Prateek Bhustali Date: Sun, 8 May 2022 20:39:59 +0200 Subject: [PATCH 10/88] =?UTF-8?q?Added=20documentation=20Cram=C3=A9r-von?= =?UTF-8?q?=20Mises=20sensitivity?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../sensitivity/cramer_von_mises/README.rst | 3 + docs/source/sensitivity/cramer_von_mises.rst | 61 +++++++++++++++++++ docs/source/sensitivity/index.rst | 2 + 3 files changed, 66 insertions(+) create mode 100644 docs/code/sensitivity/cramer_von_mises/README.rst create mode 100644 docs/source/sensitivity/cramer_von_mises.rst diff --git a/docs/code/sensitivity/cramer_von_mises/README.rst b/docs/code/sensitivity/cramer_von_mises/README.rst new file mode 100644 index 000000000..ea5f804b6 --- /dev/null +++ b/docs/code/sensitivity/cramer_von_mises/README.rst @@ -0,0 +1,3 @@ +CramĆ©r-von Mises Sensitivity indices +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + diff --git a/docs/source/sensitivity/cramer_von_mises.rst b/docs/source/sensitivity/cramer_von_mises.rst new file mode 100644 index 000000000..0c748bbb7 --- /dev/null +++ b/docs/source/sensitivity/cramer_von_mises.rst @@ -0,0 +1,61 @@ +CramĆ©r-von Mises indices +---------------------------------------- + +A sensitivity index based on the CramĆ©r-von Mises distance. In contrast to variance based Sobol indices it takes into account the whole distribution of the model output and is therefore considered as a moment-free method [5]_. Furthermore the index can be naturally extended to multivariate model outputs (not implemented yet in UQPy). + +Consider a model :math:`Y=f(X): \mathbb{R}^d \rightarrow \mathbb{R}^k` with :math:`d` inputs :math:`X_{(1)}, X_{(2)}, \ldots, X_{(d)}` and :math:`k` outputs :math:`Y_{(1)}, Y_{(2)}, \ldots, Y_{(k)}`. We define the cumulative distribution function :math:`F(t)` of :math:`Y` as: + +.. math:: + + F(t)=\mathbb{P}(Z \leqslant t)=\mathbb{E}\left[\mathbb{1}_{\{Z \leqslant t\}}\right] \text { for } t=\left(t_{1}, \ldots, t_{k}\right) \in \mathbb{R}^{k} + +and the conditional distribution function :math:`F(t)` of :math:`Y` as: + +.. math:: + + F^{v}(t)=\mathbb{P}\left(Z \leqslant t \mid X_{v}\right)=\mathbb{E}\left[\mathbb{1}_{\{Z \leqslant t\}} \mid X_{v}\right] \text { for } t=\left(t_{1}, \ldots, t_{k}\right) \in \mathbb{R}^{k} + +where, :math:`\{Z \leqslant t\} \text { means that } \left\{Z_{1} \leqslant t_{1}, \ldots, Z_{k} \leqslant t_{k}\right\}`. + +The first order CramĆ©r-von Mises index :math:`S_{2, C V M}^{i}` (for input :math:`v = {i}`) is defined as: + +.. math:: + + S_{2, C V M}^{i}:=\frac{\int_{\mathbb{R}^{k}} \mathbb{E}\left[\left(F(t)-F^{i}(t)\right)^{2}\right] d F(t)}{\int_{\mathbb{R}^{k}} F(t)(1-F(t)) d F(t)} + +and the total CramĆ©r-von Mises index :math:`S_{2, C V M}^{T o t, i}` (for input :math:`v = {i}`) is defined as: + +.. math:: + + S_{2, C V M}^{T o t, i}:=1-S_{2, C V M}^{\sim i}=1-\frac{\int_{\mathbb{R}^{k}} \mathbb{E}\left[\left(F(t)-F^{\sim i}(t)\right)^{2}\right] d F(t)}{\int_{\mathbb{R}^{k}} F(t)(1-F(t)) d F(t)} + +The above first and total order indices are estimated using the Pick-and-Freeze approach. This requires :math:`N(d+2)` model evaluations, where :math:`N` is the number of samples. (For implementation details, see also [6]_.) + +CramĆ©r-von Mises Class +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The :class:`CramĆ©r-von Mises` class is imported using the following command: + +>>> from UQpy.sensitivity.cramer_von_mises import CramerVonMises + +Methods +""""""" +.. autoclass:: UQpy.sensitivity.CramervonMises + :members: run + +Attributes +"""""""""" +.. autoattribute:: UQpy.sensitivity.CramervonMises.CVM_i +.. autoattribute:: UQpy.sensitivity.CramervonMises.CI_CVM_i +.. autoattribute:: UQpy.sensitivity.CramervonMises.sobol_i +.. autoattribute:: UQpy.sensitivity.CramervonMises.sobol_total_i +.. autoattribute:: UQpy.sensitivity.CramervonMises.n_samples +.. autoattribute:: UQpy.sensitivity.CramervonMises.num_vars + + +Examples +"""""""""" + +.. toctree:: + + CramĆ©r-von Mises Examples <../auto_examples/sensitivity/cramer_von_mises/index> diff --git a/docs/source/sensitivity/index.rst b/docs/source/sensitivity/index.rst index 0e5fef5c0..324fbf666 100644 --- a/docs/source/sensitivity/index.rst +++ b/docs/source/sensitivity/index.rst @@ -6,6 +6,7 @@ This module contains functionality for all the sampling methods supported in :py The module currently contains the following classes: - :py:class:`.Sobol`: Class to compute Sobol sensitivity indices. +- :py:class:`.CramervonMises`: Class to compute CramĆ©r-von Mises sensitivity indices. - :py:class:`.MorrisSensitivity`: Class to perform Morris. - :py:class:`.PceSensitivity`: Class to compute the sensitivity indices using the :class:`.PolynomialChaosExpansion` method. @@ -20,3 +21,4 @@ Sensitivity analysis comprises techniques focused on determining how the variati Morris Sensitivity Polynomial Chaos Sensitivity Sobol Sensitivity + CramĆ©r-von Mises Sensitivity From 4d4a012828c69713b17dc78fb565a48d12738695 Mon Sep 17 00:00:00 2001 From: Prateek Bhustali Date: Sun, 8 May 2022 20:40:14 +0200 Subject: [PATCH 11/88] =?UTF-8?q?Added=20examples=20Cram=C3=A9r-von=20Mise?= =?UTF-8?q?s=20sensitivity?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../cramer_von_mises/local_exponential.py | 20 ++++++ .../cramer_von_mises/local_sobol_func.py | 42 +++++++++++ .../cramer_von_mises/plot_cvm_exponential.py | 58 +++++++++++++++ .../cramer_von_mises/plot_cvm_sobol_func.py | 70 +++++++++++++++++++ docs/source/conf.py | 2 + 5 files changed, 192 insertions(+) create mode 100644 docs/code/sensitivity/cramer_von_mises/local_exponential.py create mode 100644 docs/code/sensitivity/cramer_von_mises/local_sobol_func.py create mode 100644 docs/code/sensitivity/cramer_von_mises/plot_cvm_exponential.py create mode 100644 docs/code/sensitivity/cramer_von_mises/plot_cvm_sobol_func.py diff --git a/docs/code/sensitivity/cramer_von_mises/local_exponential.py b/docs/code/sensitivity/cramer_von_mises/local_exponential.py new file mode 100644 index 000000000..1fd0ef0d9 --- /dev/null +++ b/docs/code/sensitivity/cramer_von_mises/local_exponential.py @@ -0,0 +1,20 @@ +""" + +Auxiliary file +============================================== + +""" + +import numpy as np + + +def evaluate(X: np.array) -> np.array: + r"""A non-linear function that is used to demonstrate sensitivity index. + + .. math:: + f(x) = \exp(x_1 + 2*x_2) + """ + + Y = np.exp(X[:, 0] + 2 * X[:, 1]) + + return Y diff --git a/docs/code/sensitivity/cramer_von_mises/local_sobol_func.py b/docs/code/sensitivity/cramer_von_mises/local_sobol_func.py new file mode 100644 index 000000000..1ccabc6dd --- /dev/null +++ b/docs/code/sensitivity/cramer_von_mises/local_sobol_func.py @@ -0,0 +1,42 @@ +""" + +Auxiliary file +============================================== + +""" + +import numpy as np +import copy + + +def evaluate(X, a_values): + + dims = len(a_values) + g = 1 + + for i in range(dims): + g_i = (np.abs(4 * X[:, i] - 2) + a_values[i]) / (1 + a_values[i]) + g *= g_i + + return g + + +def sensitivities(a_values): + + dims = len(a_values) + + Total_order = np.zeros((dims, 1)) + + V_i = (3 * (1 + a_values) ** 2) ** (-1) + + total_variance = np.prod(1 + V_i) - 1 + + First_order = V_i / total_variance + + for i in range(dims): + + rem_First_order = copy.deepcopy(V_i) + rem_First_order[i] = 0 + Total_order[i] = V_i[i] * np.prod(rem_First_order + 1) / total_variance + + return First_order.reshape(-1, 1), Total_order diff --git a/docs/code/sensitivity/cramer_von_mises/plot_cvm_exponential.py b/docs/code/sensitivity/cramer_von_mises/plot_cvm_exponential.py new file mode 100644 index 000000000..244fd7805 --- /dev/null +++ b/docs/code/sensitivity/cramer_von_mises/plot_cvm_exponential.py @@ -0,0 +1,58 @@ +""" + +Exponential function +============================================== + +.. math:: + f(x) := \exp(x_1 + 2x_2), \quad x_1, x_2 \sim \mathcal{N}(0, 1) + +""" + +# %% +from UQpy.run_model.RunModel import RunModel +from UQpy.run_model.model_execution.PythonModel import PythonModel +from UQpy.distributions import Normal +from UQpy.distributions.collection.JointIndependent import JointIndependent +from UQpy.sensitivity.cramer_von_mises import CramervonMises as cvm + +# %% +# Create Model object +model = PythonModel( + model_script="local_exponential.py", + model_object_name="evaluate", + var_names=[r"$X_1$", "$X_2$"], + delete_files=True, +) + +runmodel_obj = RunModel(model=model) + +# Define distribution object +dist_object = JointIndependent([Normal(0, 1)] * 2) + +# %% [markdown] +# Compute Cramer-von Mises indices + +# %% +# create cvm object +SA = cvm(runmodel_obj, dist_object) + +# Compute Sobol indices using the pick and freeze algorithm +computed_indices = SA.run(n_samples=20_000, estimate_sobol_indices=True) + +# %% [markdown] +# Cramer-von Mises sensitivity analysis +# +# Expected value of the sensitivity indices: +# +# $S^1_{CVM} = \frac{6}{\pi} \operatorname{arctan}(2) - 2 \approx 0.1145$ +# +# $S^2_{CVM} = \frac{6}{\pi} \operatorname{arctan}(\sqrt{19}) - 2 \approx 0.5693$ + +# %% +computed_indices["CVM_i"] + +# %% +computed_indices["sobol_i"] + +# %% +computed_indices["sobol_total_i"] diff --git a/docs/code/sensitivity/cramer_von_mises/plot_cvm_sobol_func.py b/docs/code/sensitivity/cramer_von_mises/plot_cvm_sobol_func.py new file mode 100644 index 000000000..da17e3e2f --- /dev/null +++ b/docs/code/sensitivity/cramer_von_mises/plot_cvm_sobol_func.py @@ -0,0 +1,70 @@ +r""" + +Sobol function +============================================== + +.. math:: + + g(x_1, x_2, \ldots, x_D) := \prod_{i=1}^{D} \frac{|4x_i - 2| + a_i}{1 + a_i}, + +where, + +.. math:: + x_i \sim \mathcal{U}(0, 1), \quad a_i \in \mathbb{R}. + +""" + +# %% +import numpy as np + +from UQpy.run_model.RunModel import RunModel +from UQpy.run_model.model_execution.PythonModel import PythonModel +from UQpy.distributions import Uniform +from UQpy.distributions.collection.JointIndependent import JointIndependent +from UQpy.sensitivity.cramer_von_mises import CramervonMises as cvm + +# %% +# Create Model object +num_vars = 6 +a_vals = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]) + +model = PythonModel( + model_script="local_sobol_func.py", + model_object_name="evaluate", + var_names=[r"$X_1$", "$X_2$"], + delete_files=True, + a_values=a_vals, +) + +runmodel_obj = RunModel(model=model) + +# Define distribution object +dist_object = JointIndependent([Uniform(0, 1)] * num_vars) + +# %% +SA = cvm(runmodel_obj, dist_object) + +# Compute Sobol indices using the pick and freeze algorithm +computed_indices = SA.run(n_samples=20_000, estimate_sobol_indices=True) + +# %% +computed_indices["CVM_i"] + +# %% [markdown] +# Sobol indices computed analytically +# +# $S_1$ = 0.46067666 +# +# $S_2$ = 0.20474518 +# +# $S_3$ = 0.11516917 +# +# $S_4$ = 0.07370827 +# +# $S_5$ = 0.0511863 +# +# $S_6$ = 0.03760626 +# + +# %% +computed_indices["sobol_i"] diff --git a/docs/source/conf.py b/docs/source/conf.py index 318e66625..9c31fa120 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -92,6 +92,7 @@ "../code/transformations/nataf", "../code/sensitivity/morris", "../code/sensitivity/sobol", + "../code/sensitivity/cramer_von_mises", "../code/stochastic_processes/bispectral", "../code/stochastic_processes/karhunen_loeve", "../code/stochastic_processes/spectral", @@ -127,6 +128,7 @@ "auto_examples/transformations/nataf", "auto_examples/sensitivity/morris", "auto_examples/sensitivity/sobol", + "auto_examples/sensitivity/cramer_von_mises", "auto_examples/stochastic_processes/bispectral", "auto_examples/stochastic_processes/karhunen_loeve", "auto_examples/stochastic_processes/spectral", From 8ab489215f852ec46de90164ad92bf4281dd39a5 Mon Sep 17 00:00:00 2001 From: Prateek Bhustali Date: Sun, 8 May 2022 21:16:56 +0200 Subject: [PATCH 12/88] Fixed references in documentation --- docs/source/sensitivity/cramer_von_mises.rst | 8 ++++++-- docs/source/sensitivity/sobol.rst | 15 ++++++++++----- 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/docs/source/sensitivity/cramer_von_mises.rst b/docs/source/sensitivity/cramer_von_mises.rst index 0c748bbb7..ccd412af9 100644 --- a/docs/source/sensitivity/cramer_von_mises.rst +++ b/docs/source/sensitivity/cramer_von_mises.rst @@ -1,7 +1,7 @@ CramĆ©r-von Mises indices ---------------------------------------- -A sensitivity index based on the CramĆ©r-von Mises distance. In contrast to variance based Sobol indices it takes into account the whole distribution of the model output and is therefore considered as a moment-free method [5]_. Furthermore the index can be naturally extended to multivariate model outputs (not implemented yet in UQPy). +A sensitivity index based on the CramĆ©r-von Mises distance. In contrast to variance based Sobol indices it takes into account the whole distribution of the model output and is therefore considered as a moment-free method [1]_. Furthermore the index can be naturally extended to multivariate model outputs (not implemented yet in UQPy). Consider a model :math:`Y=f(X): \mathbb{R}^d \rightarrow \mathbb{R}^k` with :math:`d` inputs :math:`X_{(1)}, X_{(2)}, \ldots, X_{(d)}` and :math:`k` outputs :math:`Y_{(1)}, Y_{(2)}, \ldots, Y_{(k)}`. We define the cumulative distribution function :math:`F(t)` of :math:`Y` as: @@ -29,7 +29,11 @@ and the total CramĆ©r-von Mises index :math:`S_{2, C V M}^{T o t, i}` (for input S_{2, C V M}^{T o t, i}:=1-S_{2, C V M}^{\sim i}=1-\frac{\int_{\mathbb{R}^{k}} \mathbb{E}\left[\left(F(t)-F^{\sim i}(t)\right)^{2}\right] d F(t)}{\int_{\mathbb{R}^{k}} F(t)(1-F(t)) d F(t)} -The above first and total order indices are estimated using the Pick-and-Freeze approach. This requires :math:`N(d+2)` model evaluations, where :math:`N` is the number of samples. (For implementation details, see also [6]_.) +The above first and total order indices are estimated using the Pick-and-Freeze approach. This requires :math:`N(d+2)` model evaluations, where :math:`N` is the number of samples. (For implementation details, see also [2]_.) + +.. [1] Gamboa, F., Klein, T., & Lagnoux, A. (2018). Sensitivity Analysis Based on CramĆ©r-von Mises Distance. SIAM/ASA Journal on Uncertainty Quantification, 6(2), 522-548. doi:10.1137/15M1025621. (`Link `_) + +.. [2] Gamboa, F., Gremaud, P., Klein, T., & Lagnoux, A. (2020). Global Sensitivity Analysis: a new generation of mighty estimators based on rank statistics. arXiv [math.ST]. (`Link `_) CramĆ©r-von Mises Class ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/docs/source/sensitivity/sobol.rst b/docs/source/sensitivity/sobol.rst index 1966330c1..462db15fd 100644 --- a/docs/source/sensitivity/sobol.rst +++ b/docs/source/sensitivity/sobol.rst @@ -31,7 +31,7 @@ The Sobol indices are computed using the Pick-and-Freeze approach for single out Compared to "Sobol1993", the "Janon2014" estimator makes more efficient use of model evaluations and produces better smaller confidence intervals. -- Sobol1993: Requires :math:`N(m + 1)` model evaluations [2]_. +- Sobol1993: Requires :math:`N(m + 1)` model evaluations [1]_. .. math:: S_{i} = \frac{\mathbb{V}\left[E\left(Y \mid X_{i}\right)\right]}{\mathbb{V}(Y)} = \frac{ (1/N) Y_{A} \cdot Y_{C_{i}}-f_{0}^{2}}{(1 / N) Y_{A} \cdot Y_{A}-f_{0}^{2}} @@ -39,15 +39,15 @@ Compared to "Sobol1993", the "Janon2014" estimator makes more efficient use of m .. math:: y_{A}=f(A), \quad y_{C_{i}}=f(C_{i}), \quad f_{0}^{2}=\left(\frac{1}{N} \sum_{j=1}^{N} y_{A}^{(j)} \right)^{2} -- Saltelli2002: Requires :math:`N(2m + 2)` model evaluations [4]_. +- Saltelli2002: Requires :math:`N(2m + 2)` model evaluations [2]_. 2. **Second order indices** (:math:`S_{ij}`) -- Saltelli2002: Requires :math:`N(2m + 2)` model evaluations [4]_. +- Saltelli2002: Requires :math:`N(2m + 2)` model evaluations [2]_. 3. **Total order indices** (:math:`S_{T_{i}}`) -- Homma1996: Requires :math:`N(m + 1)` model evaluations [2]_. +- Homma1996: Requires :math:`N(m + 1)` model evaluations [1]_. .. math:: S_{T_{i}} = 1 - \frac{\mathbb{V}\left[E\left(Y \mid \mathbf{X}_{\sim_{i}}\right)\right]}{\mathbb{V}(Y)} = 1 - \frac{ (1 / N) Y_{B} \cdot Y_{C_{i}}-f_{0}^{2}}{(1 / N) Y_{A} \cdot Y_{A}-f_{0}^{2}} @@ -55,9 +55,14 @@ Compared to "Sobol1993", the "Janon2014" estimator makes more efficient use of m .. math:: y_{A}=f(A), \quad y_{B}=f(B), \quad y_{C_{i}}=f(C_{i}), \quad f_{0}^{2}=\left(\frac{1}{2N} \sum_{j=1}^{N} y_{A}^{(j)} + y_{B}^{(j)} \right)^{2} -- Saltelli2002: Requires :math:`N(2m + 2)` model evaluations [4]_. +- Saltelli2002: Requires :math:`N(2m + 2)` model evaluations [2]_. +.. [1] Saltelli, A. (2008). Global sensitivity analysis: the primer. + John Wiley. ISBN: 9780470059975 + +.. [2] Saltelli, A. (2002). Making best use of model evaluations to compute sensitivity indices. (`Link `_) + Sobol Class ^^^^^^^^^^^^^^^^^^ From 852be9d3f41a0815be3d831088c21579ca76fb04 Mon Sep 17 00:00:00 2001 From: Prateek Bhustali Date: Sun, 8 May 2022 21:17:22 +0200 Subject: [PATCH 13/88] Rearranged order in index --- docs/source/sensitivity/index.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/source/sensitivity/index.rst b/docs/source/sensitivity/index.rst index 324fbf666..4bb68e1a9 100644 --- a/docs/source/sensitivity/index.rst +++ b/docs/source/sensitivity/index.rst @@ -5,10 +5,10 @@ This module contains functionality for all the sampling methods supported in :py The module currently contains the following classes: -- :py:class:`.Sobol`: Class to compute Sobol sensitivity indices. - :py:class:`.CramervonMises`: Class to compute CramĆ©r-von Mises sensitivity indices. - :py:class:`.MorrisSensitivity`: Class to perform Morris. - :py:class:`.PceSensitivity`: Class to compute the sensitivity indices using the :class:`.PolynomialChaosExpansion` method. +- :py:class:`.Sobol`: Class to compute Sobol sensitivity indices. Sensitivity analysis comprises techniques focused on determining how the variations of input variables :math:`X=\left[ X_{1}, X_{2},ā€¦,X_{d} \right]` of a mathematical model influence the response value :math:`Y=h(X)`. @@ -18,7 +18,7 @@ Sensitivity analysis comprises techniques focused on determining how the variati :hidden: :caption: Sensitivity + CramĆ©r-von Mises Sensitivity Morris Sensitivity Polynomial Chaos Sensitivity Sobol Sensitivity - CramĆ©r-von Mises Sensitivity From 69ac38aec2546bd9b6afb8b634eb9d5ff4fd6115 Mon Sep 17 00:00:00 2001 From: Prateek Bhustali Date: Sun, 8 May 2022 21:18:11 +0200 Subject: [PATCH 14/88] Minor fixes in docstrings --- src/UQpy/sensitivity/cramer_von_mises.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/UQpy/sensitivity/cramer_von_mises.py b/src/UQpy/sensitivity/cramer_von_mises.py index 9cb2fdfe6..229a8b761 100644 --- a/src/UQpy/sensitivity/cramer_von_mises.py +++ b/src/UQpy/sensitivity/cramer_von_mises.py @@ -41,6 +41,9 @@ class CramervonMises(Sensitivity): random variable, or :class:`.JointIndependent` object \ (multivariate RV with independent marginals). + :param random_state: Random seed used to initialize the pseudo-random number \ + generator. Default is :any:`None`. + **Methods:** """ @@ -95,15 +98,13 @@ def run( Compute the CramĆ©r-von Mises indices. :param n_samples: Number of samples used to compute the CramĆ©r-von Mises indices. \ - If :code:`None`, the number of samples is set to the number of samples \ - in the model. + Default is 1,000. :param estimate_sobol_indices: If :code:`True`, the Sobol indices are estimated \ using the pick-and-freeze samples. :param num_bootstrap_samples: Number of bootstrap samples used to estimate the \ - Sobol indices. If :code:`None`, the number of bootstrap samples is set \ - to the number of samples in the model. + Sobol indices. Default is :any:`None`. :param confidence_level: Confidence level used to compute the confidence \ intervals of the CramĆ©r-von Mises indices. From 3f59fd89706be8e48dfdae1d1efef719aaa7f2d4 Mon Sep 17 00:00:00 2001 From: Prateek Bhustali Date: Sun, 8 May 2022 21:18:40 +0200 Subject: [PATCH 15/88] Changed docstrings to match rtd_theme --- src/UQpy/sensitivity/sobol.py | 170 ++++++++++++---------------------- 1 file changed, 57 insertions(+), 113 deletions(-) diff --git a/src/UQpy/sensitivity/sobol.py b/src/UQpy/sensitivity/sobol.py index d4fb1de56..0eb327aaf 100644 --- a/src/UQpy/sensitivity/sobol.py +++ b/src/UQpy/sensitivity/sobol.py @@ -69,65 +69,16 @@ class Sobol(Sensitivity): For time-series models, the sensitivity indices are computed for each time instant separately. (Pointwise-in-time Sobol indices) - **Inputs:** - - * **runmodel_object** (``RunModel`` object): - The computational model. It should be of type - ``RunModel`` (see ``RunModel`` class). - The output QoI can be a scalar or vector of - length `ny`, then the sensitivity indices of - all `ny` outputs are computed independently. + :param runmodel_object: The computational model. It should be of type :class:`.RunModel`. \ + The output QoI can be a scalar or vector of length :code:`ny`, then the sensitivity \ + indices of all :code:`ny` outputs are computed independently. - * **dist_object** ((list of) ``Distribution`` object(s)): - List of ``Distribution`` objects corresponding - to each random variable, or ``JointInd`` object + :param distributions: List of :class:`.Distribution` objects corresponding to each \ + random variable, or :class:`.JointIndependent` object \ (multivariate RV with independent marginals). - * **random_state** (None or `int` or ``numpy.random.RandomState`` object): - Random seed used to initialize the - pseudo-random number generator. - Default is None. - - **Attributes:** - - * **sobol_i** (`ndarray`): - First order sensitivity indices. - Shape: `(num_vars, n_outputs)` - - * **sobol_total_i** (`ndarray`): - Total order sensitivity indices. - Shape: `(num_vars, n_outputs)` - - * **sobol_ij** (`ndarray`): - Second order sensitivity indices. - Shape: `(num_second_order_terms, n_outputs)` - - * **CI_sobol_i** (`ndarray`): - Confidence intervals for the first order sensitivity indices. - Shape: `(num_vars, 2)` - - if multioutput: Shape: `(n_outputs, num_vars, 2)` - - * **CI_sobol_total_i** (`ndarray`): - Confidence intervals for the total order sensitivity indices. - Shape: `(num_vars, 2)` - - if multioutput: Shape: `(n_outputs, num_vars, 2)` - - * **CI_sobol_ij** (`ndarray`): - Confidence intervals for the second order Sobol indices. - Shape: `(num_second_order_terms, 2)` - - if multioutput: Shape: `(n_outputs, num_second_order_terms, 2)` - - * **n_samples** (`int`): - Number of samples used to compute the sensitivity indices. - - * **num_vars** (`int`): - Number of model input variables. - - * **multioutput** (`bool`): - True if the model has multiple outputs. + :param random_state: Random seed used to initialize the pseudo-random number \ + generator. Default is :any:`None`. **Methods:** """ @@ -150,6 +101,33 @@ def __init__( # add the handler to the logger self.logger.addHandler(ch) + self.sobol_i = None + "First order Sobol indices, :class:`numpy.ndarray` of shape `(num_vars, n_outputs)`" + + self.sobol_total_i = None + "Total order Sobol indices, :class:`numpy.ndarray` of shape `(num_vars, n_outputs)`" + + self.sobol_ij = None + "Second order Sobol indices, :class:`numpy.ndarray` of shape `(num_second_order_terms, n_outputs)`" + + self.CI_sobol_i = None + "Confidence intervals for the first order Sobol indices, :class:`numpy.ndarray` of shape `(num_vars, 2)`" + + self.CI_sobol_total_i = None + "Confidence intervals for the total order Sobol indices, :class:`numpy.ndarray` of shape `(num_vars, 2)`" + + self.CI_sobol_ij = None + "Confidence intervals for the second order Sobol indices, :class:`numpy.ndarray` of shape `(num_second_order_terms, 2)`" + + self.n_samples = None + "Number of samples used to compute the sensitivity indices, :class:`int`" + + self.num_vars = None + "Number of model input variables, :class:`int`" + + self.multioutput = None + "True if the model has multiple outputs, :class:`bool`" + def run( self, n_samples=1_000, @@ -164,71 +142,37 @@ def run( """ Compute the sensitivity indices and confidence intervals. - **Inputs:** - - * **n_samples** (`int`): - Number of samples used to compute the sensitivity indices. + :param n_samples: Number of samples used to compute the sensitivity indices. \ Default is 1,000. - * **num_boostrap_samples** (`int`): - Number of bootstrap samples used to compute - the confidence intervals. - Default is None. - - * **confidence_interval** (`float`): - Confidence interval used to compute the confidence intervals. - Default is 0.95. - - * **estimate_second_order** (`bool`): - If True, compute the second order sensitivity indices. - Default is False. - - * **first_order_scheme** (`str`): - Scheme used to compute the first order Sobol indices. - Default is "Sobol1993". - - * **total_order_scheme** (`str`): - Scheme used to compute the total order Sobol indices. - Default is "Homma1996". - - * **second_order_scheme** (`str`): - Scheme used to compute the second order Sobol indices. - Default is "Saltelli2002". - - **Outputs:** - - * **computed_indices** (`dict`): - Dictionary containing the computed sensitivity indices. - - * **sobol_i** (`ndarray`): - First order Sobol indices. - Shape: `(num_vars, n_outputs)` - - * **sobol_total_i** (`ndarray`): - Total order Sobol indices. - Shape: `(num_vars, n_outputs)` - - * **sobol_ij** (`ndarray`): - Second order Sobol indices. - Shape: `(num_second_order_terms, n_outputs)` + :param num_bootstrap_samples: Number of bootstrap samples used to compute the \ + confidence intervals. Default is :any:`None`. - * **CI_sobol_i** (`ndarray`): - Confidence intervals for the first order Sobol indices. - Shape: `(num_vars, 2)` + :param confidence_interval: Confidence level used to compute the confidence \ + intervals. Default is 0.95. - if multioutput: Shape: `(n_outputs, num_vars, 2)` + :param estimate_second_order: If True, the second order Sobol indices are \ + estimated. Default is False. - * **CI_sobol_total_i** (`ndarray`): - Confidence intervals for the total order Sobol indices. - Shape: `(num_vars, 2)` + :param first_order_scheme: Scheme used to compute the first order Sobol \ + indices. Default is "Janon2014". - if multioutput: Shape: `(n_outputs, num_vars, 2)` + :param total_order_scheme: Scheme used to compute the total order Sobol \ + indices. Default is "Homma1996". - * **CI_sobol_ij** (`ndarray`): - Confidence intervals for the second order Sobol indices. - Shape: `(num_second_order_terms, 2)` + :param second_order_scheme: Scheme used to compute the second order \ + Sobol indices. Default is "Saltelli2002". - if multioutput: Shape: `(n_outputs, num_second_order_terms, 2)` + :return: A :class:`dict` with the following keys: \ + :code:`sobol_i` of shape :code:`(num_vars, 1)`, \ + :code:`sobol_total_i` of shape :code:`(num_vars, 1)`, \ + :code:`sobol_ij` of shape :code:`(num_second_order_terms, 1)`, \ + :code:`CI_sobol_i` of shape :code:`(num_vars, 2)`, \ + if multioutput: Shape: `(n_outputs, num_vars, 2)`, \ + :code:`CI_sobol_total_i` of shape :code:`(num_vars, 2)`, \ + if multioutput: Shape: `(n_outputs, num_vars, 2)`, \ + :code:`CI_sobol_ij` of shape :code:`(num_second_order_terms, 2)` + if multioutput: Shape: `(n_outputs, num_second_order_terms, 2)`, \ """ # Check n_samples data type From bc4648e0217ee00e27b7c82c9eaef86bb54b37da Mon Sep 17 00:00:00 2001 From: Dimitris Tsapetis Date: Sun, 8 May 2022 15:43:36 -0400 Subject: [PATCH 16/88] Removes import warnings --- src/UQpy/sampling/mcmc/DRAM.py | 4 +--- src/UQpy/sampling/mcmc/baseclass/MCMC.py | 2 ++ .../surrogates/gaussian_process/GaussianProcessRegression.py | 2 +- src/UQpy/utilities/MinimizeOptimizer.py | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/UQpy/sampling/mcmc/DRAM.py b/src/UQpy/sampling/mcmc/DRAM.py index 3bfe64671..79e18e35a 100644 --- a/src/UQpy/sampling/mcmc/DRAM.py +++ b/src/UQpy/sampling/mcmc/DRAM.py @@ -1,10 +1,8 @@ import logging from typing import Callable import warnings - -import numpy as np - warnings.filterwarnings('ignore') +import numpy as np from beartype import beartype from UQpy.sampling.mcmc.baseclass.MCMC import MCMC diff --git a/src/UQpy/sampling/mcmc/baseclass/MCMC.py b/src/UQpy/sampling/mcmc/baseclass/MCMC.py index 9481be501..a863c9d7b 100644 --- a/src/UQpy/sampling/mcmc/baseclass/MCMC.py +++ b/src/UQpy/sampling/mcmc/baseclass/MCMC.py @@ -1,5 +1,7 @@ import logging from typing import Callable, Tuple, List +import warnings +warnings.filterwarnings('ignore') import numpy as np from beartype import beartype diff --git a/src/UQpy/surrogates/gaussian_process/GaussianProcessRegression.py b/src/UQpy/surrogates/gaussian_process/GaussianProcessRegression.py index 4e4f2973b..1d4741e58 100755 --- a/src/UQpy/surrogates/gaussian_process/GaussianProcessRegression.py +++ b/src/UQpy/surrogates/gaussian_process/GaussianProcessRegression.py @@ -38,7 +38,7 @@ def __init__( input attribute defines the dimension of input training point, thus its length/shape should be equal to the input dimension plus one (d+1), this list/array includes 'd' length scale and process standard deviation. In case of noisy observations/output, its length/shape should be equal to the input dimension plus two (d+2), this - list/array includes 'd' lengthscales, process standar deviation and noise variance. + list/array includes 'd' lengthscales, process standard deviation and noise variance. :param regression_model: A class object, which computes the basis function at a sample point. If regression_model is None, this class will train GP with regression. Default: None diff --git a/src/UQpy/utilities/MinimizeOptimizer.py b/src/UQpy/utilities/MinimizeOptimizer.py index 7bde05daa..aa76a99ec 100644 --- a/src/UQpy/utilities/MinimizeOptimizer.py +++ b/src/UQpy/utilities/MinimizeOptimizer.py @@ -4,7 +4,7 @@ class MinimizeOptimizer: - def __init__(self, method: str = 'bfgs', bounds=None): + def __init__(self, method: str = 'l-bfgs-b', bounds=None): # super().__init__(bounds) self._bounds = None self.logger = logging.getLogger(__name__) From eb6437812e371b8109c539ff6556c3acfcda8f67 Mon Sep 17 00:00:00 2001 From: Dimitris Tsapetis Date: Sun, 8 May 2022 16:44:23 -0400 Subject: [PATCH 17/88] Example updates --- docs/code/RunModel/abaqus_example.py | 16 ++-- .../code/RunModel/ls_dyna_example_multijob.py | 16 +--- .../RunModel/ls_dyna_example_singlejob.py | 14 +-- docs/code/RunModel/matlab_example.py | 92 +++++++++---------- docs/code/RunModel/opensees_example.py | 11 ++- docs/code/RunModel/python_example.py | 32 ++++--- docs/source/runmodel_doc.rst | 88 +++++++++--------- .../model_execution/ThirdPartyModel.py | 4 +- 8 files changed, 131 insertions(+), 142 deletions(-) diff --git a/docs/code/RunModel/abaqus_example.py b/docs/code/RunModel/abaqus_example.py index 211c1e4be..0cb1cd02d 100644 --- a/docs/code/RunModel/abaqus_example.py +++ b/docs/code/RunModel/abaqus_example.py @@ -4,18 +4,18 @@ ================================== """ - # %% md # # Import the necessary libraries. # %% - +import glob import pickle import time - +import os from UQpy.distributions import Normal, Uniform -from UQpy.run_model.RunModel import * +from UQpy.run_model.RunModel import RunModel +from UQpy.run_model.model_execution.ThirdPartyModel import ThirdPartyModel from UQpy.sampling import MonteCarloSampling calling_directory = os.getcwd() @@ -36,12 +36,12 @@ # %% -abaqus_sfe_model = RunModel(model_script='abaqus_fire_analysis.py', input_template='abaqus_input.py', - output_script='extract_abaqus_output.py', var_names=var_names, ntasks=24, - model_dir='SFE_MCS', verbose=True, cores_per_task=1) +m = ThirdPartyModel(model_script='abaqus_fire_analysis.py', input_template='abaqus_input.py', + output_script='extract_abaqus_output.py', var_names=var_names, + model_dir='SFE_MCS', ) +abaqus_sfe_model = RunModel(cores_per_task=1, ntasks=24, model=m) print('Example: Created the model object.') - # %% md # # Towards defining the sampling scheme diff --git a/docs/code/RunModel/ls_dyna_example_multijob.py b/docs/code/RunModel/ls_dyna_example_multijob.py index 7294caa56..425fcccdc 100644 --- a/docs/code/RunModel/ls_dyna_example_multijob.py +++ b/docs/code/RunModel/ls_dyna_example_multijob.py @@ -4,7 +4,6 @@ ================================== """ - # %% md # # Import the necessary libraries. @@ -12,6 +11,7 @@ # %% from UQpy.distributions import Uniform from UQpy.run_model.RunModel import RunModel +from UQpy.run_model.model_execution.ThirdPartyModel import ThirdPartyModel from UQpy.sampling import MonteCarloSampling # %% md @@ -42,13 +42,7 @@ # Run the model. # %% - -run_ = RunModel(samples=x.samples, ntasks=6, model_script='dyna_script.py', input_template='dyna_input.k', - var_names=['x0', 'y0', 'z0', 'R0', 'x1', 'y1', 'z1', 'R1'], model_dir='dyna_test', cluster=True, - verbose=False, fmt='{:>10.4f}', cores_per_task=12) - - - - - - +m = ThirdPartyModel(model_script='dyna_script.py', input_template='dyna_input.k', + var_names=['x0', 'y0', 'z0', 'R0', 'x1', 'y1', 'z1', 'R1'], model_dir='dyna_test', + fmt='{:>10.4f}') +run_ = RunModel(samples=x.samples, ntasks=6, cores_per_task=12, model=m) diff --git a/docs/code/RunModel/ls_dyna_example_singlejob.py b/docs/code/RunModel/ls_dyna_example_singlejob.py index 486b2fd00..9b1925425 100644 --- a/docs/code/RunModel/ls_dyna_example_singlejob.py +++ b/docs/code/RunModel/ls_dyna_example_singlejob.py @@ -4,7 +4,6 @@ ================================== """ - # %% md # # Import the necessary libraries. @@ -12,6 +11,7 @@ # %% from UQpy.distributions import Uniform from UQpy.run_model.RunModel import RunModel +from UQpy.run_model.model_execution.ThirdPartyModel import ThirdPartyModel from UQpy.sampling import MonteCarloSampling # %% md @@ -40,12 +40,6 @@ # Run the model. # %% -run_ = RunModel(samples=x.samples, ntasks=1, model_script='dyna_script.py', input_template='dyna_input.k', - var_names=['x0', 'y0', 'z0', 'R0', 'x1', 'y1', 'z1', 'R1'], model_dir='dyna_test', cluster=True, - verbose=False, fmt='{:>10.4f}', cores_per_task=48) - - - - - - +m = ThirdPartyModel(model_script='dyna_script.py', input_template='dyna_input.k', + var_names=['x0', 'y0', 'z0', 'R0', 'x1', 'y1', 'z1', 'R1'], model_dir='dyna_test', fmt='{:>10.4f}') +run_ = RunModel(samples=x.samples, ntasks=1, cores_per_task=48, model=m) diff --git a/docs/code/RunModel/matlab_example.py b/docs/code/RunModel/matlab_example.py index a85fa287f..fa2fbf33c 100644 --- a/docs/code/RunModel/matlab_example.py +++ b/docs/code/RunModel/matlab_example.py @@ -4,7 +4,6 @@ ================================== """ - # %% md # # The RunModel class is capable of passing input in different formats into a single computational model. This means that @@ -64,6 +63,7 @@ from UQpy.sampling import MonteCarloSampling from UQpy.run_model.RunModel import RunModel +from UQpy.run_model.model_execution.ThirdPartyModel import ThirdPartyModel from UQpy.distributions import Normal import time import numpy as np @@ -133,10 +133,11 @@ if pick_model == 'scalar' or pick_model == 'all': # Call to RunModel - Here we run the model while instantiating the RunModel object. t = time.time() - m = RunModel(ntasks=1, model_script='matlab_model_sum_scalar.py', - input_template='sum_scalar.m', var_names=names, model_object_name="matlab", - output_script='process_matlab_output.py', output_object_name='read_output', - resume=False, model_dir='Matlab_Model', fmt="{:>10.4f}", verbose=True) + model = ThirdPartyModel(model_script='matlab_model_sum_scalar.py', + input_template='sum_scalar.m', var_names=names, model_object_name="matlab", + output_script='process_matlab_output.py', output_object_name='read_output', + model_dir='Matlab_Model', fmt="{:>10.4f}") + m = RunModel(ntasks=1, model=model) m.run(x_mcs.samples) t_ser_matlab = time.time() - t print("\nTime for serial execution:") @@ -145,7 +146,6 @@ print("The values returned from the Matlab simulation:") print(m.qoi_list) - # %% md # # 1.2 Samples passed as list, no format specification, parallel execution @@ -161,10 +161,11 @@ if pick_model == 'scalar' or pick_model == 'all': # Call to RunModel with samples as a list - Again we run the model while instantiating the RunModel object. t = time.time() - m = RunModel(samples=x_mcs_list, ntasks=2, model_script='matlab_model_sum_scalar.py', - input_template='sum_scalar.m', var_names=names, model_object_name="matlab", - output_script='process_matlab_output.py', output_object_name='read_output', resume=False, - model_dir='Matlab_Model', verbose=True) + model = ThirdPartyModel(model_script='matlab_model_sum_scalar.py', + input_template='sum_scalar.m', var_names=names, model_object_name="matlab", + output_script='process_matlab_output.py', output_object_name='read_output', + model_dir='Matlab_Model') + m = RunModel(samples=x_mcs_list, ntasks=2, model=model) t_par_matlab = time.time() - t print("\nTime for parallel execution:") print(t_par_matlab) @@ -172,7 +173,6 @@ print("The values retured from the Matlab simulation:") print(m.qoi_list) - # %% md # # Example 2: Single tri-variate random variable @@ -228,13 +228,12 @@ if pick_model == 'vector' or pick_model == 'all': # Call to RunModel - Here we run the model while instantiating the RunModel object. - # Notice that we do not specify var_names. This will default to a single variable with name x0. In this case, - # we will read them in by indexing in the input_template. t = time.time() - m = RunModel(samples=x_mcs_tri, ntasks=1, model_script='matlab_model_sum_vector_indexed.py', - input_template='sum_vector_indexed.m', model_object_name="matlab", - output_script='process_matlab_output.py', output_object_name='read_output', - resume=False, model_dir='Matlab_Model', fmt="{:>10.4f}") + model = ThirdPartyModel(model_script='matlab_model_sum_vector_indexed.py', + input_template='sum_vector_indexed.m', model_object_name="matlab", + output_script='process_matlab_output.py', output_object_name='read_output', + model_dir='Matlab_Model', fmt="{:>10.4f}", var_names=['x0']) + m = RunModel(samples=x_mcs_tri, ntasks=1, model=model) t_ser_matlab = time.time() - t print("\nTime for serial execution:") print(t_ser_matlab) @@ -255,13 +254,12 @@ if pick_model == 'vector' or pick_model == 'all': # Call to RunModel - Here we run the model while instantiating the RunModel object. - # Notice that we do not specify var_names. This will default to a single variable with name x0. In this case, - # we will read them in by indexing in the input_template. t = time.time() - m = RunModel(samples=x_mcs_tri_list, ntasks=2, model_script='matlab_model_sum_vector_indexed.py', - input_template='sum_vector_indexed.m', model_object_name="matlab", - output_script='process_matlab_output.py', output_object_name='read_output', - resume=False, model_dir='Matlab_Model') + model = ThirdPartyModel(model_script='matlab_model_sum_vector_indexed.py', + input_template='sum_vector_indexed.m', model_object_name="matlab", + output_script='process_matlab_output.py', output_object_name='read_output', + model_dir='Matlab_Model', var_names=['x0']) + m = RunModel(samples=x_mcs_tri_list, ntasks=2, model=model) t_ser_matlab = time.time() - t print("\nTime for parallel execution:") print(t_ser_matlab) @@ -285,10 +283,11 @@ # Notice that we do not specify var_names. This will default to a single variable with name x0. In this case, # we will read them in by indexing in the input_template. t = time.time() - m = RunModel(samples=x_mcs_tri, ntasks=1, model_script='matlab_model_sum_vector.py', - input_template='sum_vector.m', model_object_name="matlab", - output_script='process_matlab_output.py', output_object_name='read_output', - resume=False, model_dir='Matlab_Model', fmt="{:>10.4f}") + model = ThirdPartyModel(model_script='matlab_model_sum_vector.py', + input_template='sum_vector.m', model_object_name="matlab", + output_script='process_matlab_output.py', output_object_name='read_output', + model_dir='Matlab_Model', fmt="{:>10.4f}", var_names=['x0']) + m = RunModel(samples=x_mcs_tri, ntasks=1, model=model) t_ser_matlab = time.time() - t print("\nTime for serial execution:") print(t_ser_matlab) @@ -296,7 +295,6 @@ print("The values returned from the Matlab simulation:") print(m.qoi_list) - # %% md # # Example 3: Passing a scalar and an array to RunModel @@ -388,10 +386,11 @@ # case, x0 is a scalar and x1 is a 3x3 matrix. We will read the matrix in without indexing in the # input_template. t = time.time() - m = RunModel(samples=x_mixed_array, ntasks=1, model_script='matlab_model_det.py', - input_template='prod_determinant.m', model_object_name="matlab", - output_script='process_matlab_output.py', output_object_name='read_output', - resume=False, model_dir='Matlab_Model', fmt="{:>10.4f}") + model = ThirdPartyModel(model_script='matlab_model_det.py', + input_template='prod_determinant.m', model_object_name="matlab", var_names=['x0', 'x1'], + output_script='process_matlab_output.py', output_object_name='read_output', + model_dir='Matlab_Model', fmt="{:>10.4f}") + m = RunModel(samples=x_mixed_array, ntasks=1, model=model) t_ser_matlab = time.time() - t print("\nTime for serial execution:") print(t_ser_matlab) @@ -412,14 +411,13 @@ if pick_model == 'mixed' or pick_model == 'all': # Call to RunModel - Here we run the model while instantiating the RunModel object. - # Notice that we do not specify var_names. This will default to two variables with names x0 and x1. In this - # case, x0 is a scalar and x1 is a 3x3 matrix. We will read the matrix in with indexing in the - # input_template. + # We will read the matrix in with indexing in the input_template. t = time.time() - m = RunModel(samples=x_mixed_array, ntasks=1, model_script='matlab_model_det_index.py', + model = ThirdPartyModel(model_script='matlab_model_det_index.py', input_template='prod_determinant_index.m', model_object_name="matlab", output_script='process_matlab_output.py', output_object_name='read_output', - resume=False, model_dir='Matlab_Model', fmt="{:>10.4f}") + model_dir='Matlab_Model', fmt="{:>10.4f}", var_names=['x0', 'x1']) + m = RunModel(samples=x_mixed_array, ntasks=1, model=model) t_ser_matlab = time.time() - t print("\nTime for serial execution:") print(t_ser_matlab) @@ -442,14 +440,12 @@ if pick_model == 'mixed' or pick_model == 'all': # Call to RunModel - Here we run the model while instantiating the RunModel object. - # Notice that we do not specify var_names. This will default to two variables with names x0 and x1. In this - # case, x0 is a scalar and x1 is a 3x3 matrix. We will read the matrix in without indexing in the - # input_template. t = time.time() - m = RunModel(samples=x_mixed, ntasks=2, model_script='matlab_model_det.py', + model = ThirdPartyModel(model_script='matlab_model_det.py', input_template='prod_determinant.m', model_object_name="matlab", output_script='process_matlab_output.py', output_object_name='read_output', - resume=False, model_dir='Matlab_Model') + model_dir='Matlab_Model', var_names=['x0', 'x1']) + m = RunModel(samples=x_mixed, ntasks=2, model=model) t_ser_matlab = time.time() - t print("\nTime for serial execution:") print(t_ser_matlab) @@ -477,10 +473,11 @@ # case, x0 is a scalar and x1 is a 3x3 matrix. We will read the matrix in with indexing in the # input_template. t = time.time() - m = RunModel(samples=x_mixed, ntasks=2, model_script='matlab_model_det_index.py', + model = ThirdPartyModel(model_script='matlab_model_det_index.py', input_template='prod_determinant_index.m', model_object_name="matlab", output_script='process_matlab_output.py', output_object_name='read_output', - resume=False, model_dir='Matlab_Model') + model_dir='Matlab_Model', var_names=['x0', 'x1']) + m = RunModel(samples=x_mixed, ntasks=2, model=model) t_ser_matlab = time.time() - t print("\nTime for serial execution:") print(t_ser_matlab) @@ -508,13 +505,14 @@ # case, x0 is a scalar and x1 is a 3x3 matrix. We will read the matrix in with indexing in the # input_template. t = time.time() - m = RunModel(samples=x_mixed_array, ntasks=1, model_script='matlab_model_det_partial.py', + model = ThirdPartyModel(model_script='matlab_model_det_partial.py', input_template='prod_determinant_partial.m', model_object_name="matlab", output_script='process_matlab_output.py', output_object_name='read_output', - resume=False, model_dir='Matlab_Model', fmt="{:>10.4f}") + model_dir='Matlab_Model', fmt="{:>10.4f}", var_names=['x0', 'x1']) + m = RunModel(samples=x_mixed_array, ntasks=1, model=model) t_ser_matlab = time.time() - t print("\nTime for serial execution:") print(t_ser_matlab) print() print("The values returned from the Python simulation:") - print(m.qoi_list) \ No newline at end of file + print(m.qoi_list) diff --git a/docs/code/RunModel/opensees_example.py b/docs/code/RunModel/opensees_example.py index 21af0b8cc..c0870cbcf 100644 --- a/docs/code/RunModel/opensees_example.py +++ b/docs/code/RunModel/opensees_example.py @@ -4,7 +4,6 @@ ================================== """ - # %% md # # Import the necessary libraries. @@ -14,6 +13,7 @@ from UQpy.distributions import Uniform from UQpy.run_model.RunModel import RunModel +from UQpy.run_model.model_execution.ThirdPartyModel import ThirdPartyModel from UQpy.sampling import MonteCarloSampling # %% md @@ -41,12 +41,13 @@ # %% -names_ = ['fc1', 'fy1', 'Es1', 'fc2', 'fy2', 'Es2', 'fc3', 'fy3', 'Es3', 'fc4', 'fy4', 'Es4', 'fc5', 'fy5', 'Es5', +names_ = ['fc1', 'fy1', 'Es1', 'fc2', 'fy2', 'Es2', 'fc3', 'fy3', 'Es3', 'fc4', 'fy4', 'Es4', 'fc5', 'fy5', 'Es5', 'fc6', 'fy6', 'Es6'] -opensees_rc6_model = RunModel(samples=samples, ntasks=5, model_script='opensees_model.py', - input_template='import_variables.tcl', var_names=names_, model_object_name="opensees_run", - output_script='process_opensees_output.py', output_object_name='read_output') +m = ThirdPartyModel(model_script='opensees_model.py', input_template='import_variables.tcl', var_names=names_, + model_object_name="opensees_run", output_script='process_opensees_output.py', + output_object_name='read_output') +opensees_rc6_model = RunModel(samples=samples, ntasks=5, model=m) outputs = opensees_rc6_model.qoi_list print(outputs) diff --git a/docs/code/RunModel/python_example.py b/docs/code/RunModel/python_example.py index 55ae35916..a1b522847 100644 --- a/docs/code/RunModel/python_example.py +++ b/docs/code/RunModel/python_example.py @@ -70,6 +70,7 @@ from UQpy.sampling import MonteCarloSampling from UQpy.run_model.RunModel import RunModel +from UQpy.run_model.model_execution.PythonModel import PythonModel from UQpy.distributions import Normal import time import numpy as np @@ -139,7 +140,8 @@ if pick_model in {'scalar', 'all'}: # Call to RunModel - Here we run the model while instantiating the RunModel object. t = time.time() - m11 = RunModel(ntasks=1, model_script='python_model.py', model_object_name='SumRVs', model_dir='Python_Runs', verbose=True) + m = PythonModel(model_script='python_model.py', model_object_name='SumRVs') + m11 = RunModel(model=m, ntasks=1 ) m11.run(samples=x_mcs.samples,) t_ser_python = time.time() - t print("\nTime for serial execution:") @@ -160,8 +162,8 @@ if pick_model in {'scalar', 'all'}: # Call to RunModel - Here we run the model while instantiating the RunModel object. t = time.time() - m12 = RunModel(samples=x_mcs_list, ntasks=2, model_script='python_model.py', - model_object_name='sum_rvs', model_dir='Python_Runs') + m = PythonModel(model_script='python_model.py', model_object_name='sum_rvs') + m12 = RunModel(model=m, samples=x_mcs_list, ntasks=2) t_par_python = time.time() - t print("\nTime for parallel execution:") print(t_par_python) @@ -222,8 +224,8 @@ if pick_model in {'vector', 'all'}: # Call to RunModel - Here we run the model while instantiating the RunModel object. t = time.time() - m21 = RunModel(samples=x_mcs_tri, ntasks=1, model_script='python_model.py', - model_object_name='sum_rvs_vec', model_dir='Python_Runs') + m=PythonModel(model_script='python_model.py', model_object_name='sum_rvs_vec') + m21 = RunModel(samples=x_mcs_tri, ntasks=1, model=m) t_ser_python = time.time() - t print("\nTime for serial execution:") print(t_ser_python) @@ -243,8 +245,8 @@ if pick_model == 'vector' or pick_model == 'all': # Call to RunModel - Here we run the model while instantiating the RunModel object. t = time.time() - m22 = RunModel(samples=x_mcs_tri_list, ntasks=2, model_script='python_model.py', - model_object_name='SumRVs', model_dir='Python_Runs') + m=PythonModel(model_script='python_model.py', model_object_name='SumRVs') + m22 = RunModel(samples=x_mcs_tri_list, ntasks=2, model=m) t_par_python = time.time() - t print("\nTime for parallel execution:") print(t_par_python) @@ -337,8 +339,8 @@ if pick_model == 'mixed' or pick_model == 'all': # Call to RunModel - Here we run the model while instantiating the RunModel object. t = time.time() - m31 = RunModel(samples=x_mixed_array, ntasks=1, model_script='python_model.py', - model_object_name='DetRVs', model_dir='Python_Runs', vec=False) + m=PythonModel(model_script='python_model.py', model_object_name='DetRVs') + m31 = RunModel(samples=x_mixed_array, ntasks=1, model=m) t_ser_python = time.time() - t print("\nTime for serial execution:") print(t_ser_python) @@ -359,8 +361,8 @@ # Call to RunModel - Here we run the model while instantiating the RunModel object. # Note that the parallel model_object handles only one sample at a time. t = time.time() - m32 = RunModel(samples=x_mixed, ntasks=1, model_script='python_model.py', - model_object_name='det_rvs_par', model_dir='Python_Runs', vec=False) + m=PythonModel(model_script='python_model.py', model_object_name='det_rvs_par') + m32 = RunModel(samples=x_mixed, ntasks=1, model=m) t_par_python = time.time() - t print("\nTime for parallel execution:") print(t_par_python) @@ -419,8 +421,8 @@ if pick_model == 'mixed' or pick_model == 'all': # Call to RunModel - Here we run the model while instantiating the RunModel object. t = time.time() - m41 = RunModel(samples=x_mcs_array, ntasks=1, model_script='python_model.py', - model_object_name='det_rvs_fixed', model_dir='Python_Runs', vec=False, coeff=x) + m=PythonModel(model_script='python_model.py', model_object_name='det_rvs_fixed') + m41 = RunModel(samples=x_mcs_array, ntasks=1, model=m, coeff=x) t_ser_python = time.time() - t print("\nTime for serial execution:") print(t_ser_python) @@ -438,8 +440,8 @@ if pick_model == 'mixed' or pick_model == 'all': # Call to RunModel - Here we run the model while instantiating the RunModel object. t = time.time() - m42 = RunModel(samples=x_mcs_list, ntasks=1, model_script='python_model.py', - model_object_name='det_rvs_fixed', model_dir='Python_Runs', vec=False, coeff=x) + m=PythonModel(model_script='python_model.py', model_object_name='det_rvs_fixed') + m42 = RunModel(samples=x_mcs_list, ntasks=1, model=m, coeff=x) t_ser_python = time.time() - t print("\nTime for serial execution:") print(t_ser_python) diff --git a/docs/source/runmodel_doc.rst b/docs/source/runmodel_doc.rst index d8aff5c91..dd094dcb1 100644 --- a/docs/source/runmodel_doc.rst +++ b/docs/source/runmodel_doc.rst @@ -25,8 +25,7 @@ for execution of either a Python computational model, in which case the model is execution of a third-party software model. When running with a third-party software model, :class:`RunModel` interfaces with the model through text-based input files and serves as the "driver" to initiate the necessary calculations. At the second level, the jobs that are run by :class:`.RunModel` can either be executed -in series or in parallel. Within the third-party model parallel execution workflow, there are two cases, which are -triggered by the ``cluster`` variable. In the following sections the workflow is discussed in detail. +in series or in parallel. In the following sections the workflow is discussed in detail. .. image:: _static/Runmodel_workflow.png :width: 600 @@ -36,7 +35,7 @@ Python Model Workflow: Serial Execution ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ A common workflow in :py:mod:`UQpy` is when the computational model being evaluated is written in Python. This workflow is -invoked by calling :class:`.RunModel` without specifying an ``input_template`` (i.e. ``input_template = None``) and setting +invoked by calling :class:`.RunModel` using a :class:`.PythonModel` without specifying an ``input_template`` (i.e. ``input_template = None``) and setting ``model_script`` to the user-defined Python script containing the model. This python model is run serially by setting ``ntasks = 1``. @@ -53,8 +52,10 @@ Python Model Workflow: Parallel Execution ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The python model is executed in parallel by setting ``ntasks`` equal to the desired number of tasks (greater than 1) to be executed concurrently. In this -case, the ``model_script`` and corresponding ``model_object`` should be defined to accept a single sample. :class:`.RunModel` uses the ``multiprocessing`` library for -parallel execution of python models, which restricts parallelization to the cores available within a single computer. A workaround to this, to run in parallel across multiple compute nodes, is to treat the python model as a third-party model and run with the third-party parallel execution workflow discussed below. +case, the ``model_script`` and corresponding ``model_object`` should be defined to accept a single sample. :class:`.RunModel` uses the ``mpi4py`` library for +parallel execution of python models. OpenMPI library is essential for ``mpi4py`` +and must be installed on the computer running the model. Information regarding how to install ``OpenMPI`` is provided +at `https://www.open-mpi.org/faq/?category=building `_. Details for ``model_script`` can be found in the Section entitled `Files & Scripts Used by RunModel`_. @@ -88,23 +89,22 @@ This workflow operates in three steps as explained in the following: Third-Party Model Workflow: Parallel Execution ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Parallel execution in :class:`.RunModel` is carried out by the `GNU parallel` library :cite:`GNU_parallel`. GNU parallel is essential -and must be installed on the computer running the model. Information regarding how to install GNU parallel is provided -at `https://www.gnu.org/software/parallel `_. Parallel execution is activated in +Parallel execution in :class:`.RunModel` is carried out by the ``mpi4py`` library. OpenMPI library is essential for ``mpi4py`` +and must be installed on the computer running the model. Information regarding how to install ``OpenMPI`` is provided +at `https://www.open-mpi.org/faq/?category=building `_. Parallel execution is activated in :class:`.RunModel` by setting the parameter ``ntasks>1``. The key differences in terms of the workflow are listed below. -1. During parallel execution, all required input files are generated prior to model execution as opposed to serial execution where input files are generated +1. During parallel execution, the execution of different samples is distributed among the different tasks and required input files are generated individually prior to each run. individually prior to each run. -2. `GNU parallel` divides the total number of jobs into a number of chunks specified by the variable ``ntasks``. +2. `OpenMPI` divides the total number of jobs into a number of chunks specified by the variable ``ntasks``. ``ntasks`` number of jobs are executed in parallel and - this continues until all the jobs finish executing. Note that the jobs can be executed across multiple compute nodes when ``cluster = True`` using the SLURM - workload manager. This is specified by setting ``cores_per_task`` and ``nodes`` appropriately. Details can be found in the description of the :class:`.RunModel`. + this continues until all the jobs finish executing. This is specified by setting ``cores_per_task`` and ``nodes`` appropriately. Details can be found in the description of the :class:`.RunModel`. Whether in serial or parallel, the sample index is used by :class:`.RunModel` to keep track of model execution and to link the samples to their corresponding outputs. :class:`.RunModel` achieves this by consistently naming all the input files using the sample index (see Step 1) and passing the sample index into ``model_script``. More details on the precise structure of ``model_script`` are discussed in the Section entitled `Files & Scripts Used by RunModel`_. -3. Output processing in the parallel case is performed after all the runs are completed, whereas in the serial case it is done after every individual run. +3. Output processing in the parallel case is performed after every individual run. Directory Structure During Third-Party Model Evaluation ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -165,18 +165,18 @@ Python model may be structured are provided below. .. code-block:: python class ModelClass: - def __init__(self, input=one_sample, **kwargs): - # Execute the model using the input and get the output - self.qoi = output + def __init__(self, input=one_sample, **kwargs): + # Execute the model using the input and get the output + self.qoi = output **Example:** Model object as a function: .. code-block:: python def model_function(input=one_sample, **kwargs): - # Execute the model using the input and get the output - return output - + # Execute the model using the input and get the output + return output + * *Third-Party Software Model:* When running a third-party model, :class:`.RunModel` does not import ``model_script``. Instead, :class:`.RunModel` calls the model script through @@ -202,7 +202,7 @@ Instead, :class:`.RunModel` calls the model script through ``matlab_model_script.py`` .. code-block:: python - + import os import fire @@ -212,13 +212,13 @@ Instead, :class:`.RunModel` calls the model script through def model(sample_index): # Copy the input file into the cwd command1 = "cp ./InputFiles/matlab_model_" + str(index+1) + - ".m ." + ".m ." command2 = "matlab -nosplash -nojvm -nodisplay -nodesktop -r - 'run matlab_model_" + str(sample_index + 1) + ".m; - exit'" + 'run matlab_model_" + str(sample_index + 1) + ".m; + exit'" # Rename the output file command3 = "mv y.txt y_" + str(sample_index+1) + ".txt" - + os.system(command1) os.system(command2) os.system(command3) @@ -276,19 +276,19 @@ the quantity of interest after execution. Examples for how the output object may .. code-block:: python class OutputClass: - def __init__(self, input=sample_index): - # Postprocess the output files corresponding to the - # sample number and extract the quantity of interest - self.qoi = output + def __init__(self, input=sample_index): + # Postprocess the output files corresponding to the + # sample number and extract the quantity of interest + self.qoi = output **Example:** Output object as a function: .. code-block:: python def output_function(input=sample_index): - # Postprocess the output files corresponding to the sample - # number and extract the quantity of interest - return output + # Postprocess the output files corresponding to the sample + # number and extract the quantity of interest + return output **Executable Software** @@ -313,11 +313,23 @@ Several simple mathematical python models are provided in a Jupyter script entit models in python using :class:`.RunModel`. The notebook executes models in serial and in parallel. The models themselves are provided in the python file `python_model.py`. +PythonModel Class +------------------- + +.. autoclass:: UQpy.run_model.model_execution.PythonModel + :members: + **Third-Party Models** :class:`.RunModel` can be used to execute nearly any third-party model. In the `example` folder, we provide files for the execution of several commonly-used engineering software packages. +ThirdPartyModel Class +----------------------- + +.. autoclass:: UQpy.run_model.model_execution.ThirdPartyModel + :members: + *Abaqus Model* Code is provided for execution of 100 Monte Carlo samples of two random variables for the analysis of a beam subject to @@ -399,23 +411,11 @@ The other necessary files are the following: Note that this example is not intended to represent the accurate pushover analysis a real structure. It is for :py:mod:`UQpy` illustration purposes only. -PythonModel Class -------------------- - -.. autoclass:: UQpy.run_model.model_execution.PythonModel - :members: - -ThirdPartyModel Class ------------------------ - -.. autoclass:: UQpy.run_model.model_execution.ThirdPartyModel - :members: - RunModel Class -------------- .. autoclass:: UQpy.run_model.RunModel - :members: + :members: diff --git a/src/UQpy/run_model/model_execution/ThirdPartyModel.py b/src/UQpy/run_model/model_execution/ThirdPartyModel.py index 85501b7e9..9b5c7b97e 100644 --- a/src/UQpy/run_model/model_execution/ThirdPartyModel.py +++ b/src/UQpy/run_model/model_execution/ThirdPartyModel.py @@ -13,8 +13,8 @@ class ThirdPartyModel: - def __init__(self, var_names: list[str], input_template: str, model_script: str, model_object_name: str, - output_script: str, output_object_name: str, fmt: str = None, separator: str = ', ', + def __init__(self, var_names: list[str], input_template: str, model_script: str, output_script: str = None, + model_object_name: str = None, output_object_name: str = None, fmt: str = None, separator: str = ', ', delete_files: bool = False, model_dir: str = "Model_Runs"): """ From 92a724c1efd93d2af33233fd5491afc3b773ab21 Mon Sep 17 00:00:00 2001 From: Prateek Bhustali Date: Sun, 8 May 2022 23:48:32 +0200 Subject: [PATCH 18/88] Added Chatterjee sensitivity index --- src/UQpy/sensitivity/__init__.py | 2 + src/UQpy/sensitivity/chatterjee.py | 448 +++++++++++++++++++++++++++++ 2 files changed, 450 insertions(+) create mode 100644 src/UQpy/sensitivity/chatterjee.py diff --git a/src/UQpy/sensitivity/__init__.py b/src/UQpy/sensitivity/__init__.py index 10ca1565d..73c4166da 100644 --- a/src/UQpy/sensitivity/__init__.py +++ b/src/UQpy/sensitivity/__init__.py @@ -2,8 +2,10 @@ from UQpy.sensitivity.PceSensitivity import PceSensitivity from UQpy.sensitivity.sobol import Sobol from UQpy.sensitivity.cramer_von_mises import CramervonMises +from UQpy.sensitivity.chatterjee import Chatterjee from . import MorrisSensitivity from . import PceSensitivity from . import Sobol from . import CramervonMises +from . import Chatterjee diff --git a/src/UQpy/sensitivity/chatterjee.py b/src/UQpy/sensitivity/chatterjee.py new file mode 100644 index 000000000..9bea38cdb --- /dev/null +++ b/src/UQpy/sensitivity/chatterjee.py @@ -0,0 +1,448 @@ +""" +This module contains the Chatterjee coefficient of correlation proposed +in [1]_. + +Using the rank statistics, we can also estimate the Sobol indices proposed by +Gamboa et al. [2]_. + +References +---------- + +.. [1] Sourav Chatterjee (2021) A New Coefficient of Correlation, Journal of the + American Statistical Association, 116:536, 2009-2022, + DOI: 10.1080/01621459.2020.1758115 + +.. [2] Fabrice Gamboa, Pierre Gremaud, Thierry Klein, and AgnĆØs Lagnoux. (2020). + Global Sensitivity Analysis: a new generation of mighty estimators + based on rank statistics. + +""" + +import logging + +import numpy as np +import scipy.stats + +from UQpy.sensitivity.baseclass.sensitivity import Sensitivity +from UQpy.sensitivity.sobol import compute_first_order as compute_first_order_sobol +from UQpy.utilities.UQpyLoggingFormatter import UQpyLoggingFormatter + + +class Chatterjee(Sensitivity): + """ + Compute sensitivity indices using the Chatterjee correlation coefficient. + + Using the same model evaluations, we can also estimate the Sobol indices. + + :param runmodel_object: The computational model. It should be of type :class:`.RunModel`. \ + The output QoI can be a scalar or vector of length :code:`ny`, then the sensitivity \ + indices of all :code:`ny` outputs are computed independently. + + :param distributions: List of :class:`.Distribution` objects corresponding to each \ + random variable, or :class:`.JointIndependent` object \ + (multivariate RV with independent marginals). + + :param random_state: Random seed used to initialize the pseudo-random number \ + generator. Default is :any:`None`. + + **Methods:** + """ + + def __init__(self, runmodel_object, dist_object, random_state=None, **kwargs): + super().__init__( + runmodel_object, dist_object, random_state=random_state, **kwargs + ) + + # Create logger with the same name as the class + self.logger = logging.getLogger(__name__) + self.logger.setLevel(logging.ERROR) + frmt = UQpyLoggingFormatter() + + # create console handler with a higher log level + ch = logging.StreamHandler() + ch.setFormatter(frmt) + + # add the handler to the logger + self.logger.addHandler(ch) + + self.chatterjee_i = None + "Chatterjee sensitivity indices (First order), :class:`numpy.ndarray` of shape :code:`(num_vars, 1)`" + + self.sobol_i = None + "Sobol indices computed using the rank statistics, :class:`numpy.ndarray` of shape :code:`(num_vars, 1)`" + + self.CI_chatterjee_i = None + "Confidence intervals for the Chatterjee sensitivity indices, :class:`numpy.ndarray` of shape :code:`(num_vars, 2)`" + + self.num_vars = None + "Number of input random variables, :class:`int`" + + self.n_samples = None + "Number of samples used to estimate the sensitivity indices, :class:`int`" + + def run( + self, + n_samples=1_000, + estimate_sobol_indices=False, + num_bootstrap_samples=None, + confidence_level=0.95, + ): + """ + Compute the sensitivity indices using the Chatterjee method. + + :param n_samples: Number of samples used to compute the CramĆ©r-von Mises indices. \ + Default is 1,000. + + :param estimate_sobol_indices: If :code:`True`, the Sobol indices are estimated \ + using the pick-and-freeze samples. + + :param num_bootstrap_samples: Number of bootstrap samples used to estimate the \ + Sobol indices. Default is :any:`None`. + + :param confidence_level: Confidence level used to compute the confidence \ + intervals of the CramĆ©r-von Mises indices. + + :return: A :class:`dict` with the following keys: \ + :code:`'chatterjee_i'` of shape :code:`(num_vars, 1)`, \ + :code:`'CI_chatterjee_i'` of shape :code:`(num_vars, 2)`, \ + :code:`'sobol_i'` of shape :code:`(num_vars, 1)`. + + """ + + # Check nsamples + self.n_samples = n_samples + if not isinstance(self.n_samples, int): + raise TypeError("UQpy: nsamples should be an integer") + + # Check num_bootstrap_samples data type + if num_bootstrap_samples is not None: + if not isinstance(num_bootstrap_samples, int): + raise TypeError("UQpy: num_bootstrap_samples should be an integer.\n") + elif num_bootstrap_samples is None: + self.logger.info( + "UQpy: num_bootstrap_samples is set to None, confidence intervals will not be computed.\n" + ) + + ################## GENERATE SAMPLES ################## + + A_samples = self.dist_object.rvs(self.n_samples, random_state=self.random_state) + + self.logger.info("UQpy: Generated samples successfully.\n") + + self.num_vars = A_samples.shape[1] # number of variables + + ################# MODEL EVALUATIONS #################### + + A_model_evals = self._run_model(A_samples).reshape(-1, 1) + + self.logger.info("UQpy: Model evaluations completed.\n") + + ######################### STORAGE ######################## + # Create dictionary to store the sensitivity indices + computed_indices = {} + + ################## COMPUTE CHATTERJEE INDICES ################## + + self.chatterjee_i = self.compute_chatterjee_indices(A_samples, A_model_evals) + + self.logger.info("UQpy: Chatterjee indices computed successfully.\n") + + # Store the indices in the dictionary + computed_indices["chatterjee_i"] = self.chatterjee_i + + ################## COMPUTE SOBOL INDICES ################## + + self.logger.info("UQpy: Computing First order Sobol indices ...\n") + + if estimate_sobol_indices: + f_C_i_model_evals = self.compute_rank_analog_of_f_C_i( + A_samples, A_model_evals + ) + + self.sobol_i = self.compute_Sobol_indices(A_model_evals, f_C_i_model_evals) + + self.logger.info("UQpy: First order Sobol indices computed successfully.\n") + + # Store the indices in the dictionary + computed_indices["sobol_i"] = self.sobol_i + + ################## CONFIDENCE INTERVALS #################### + + if num_bootstrap_samples is not None: + + self.logger.info("UQpy: Computing confidence intervals ...\n") + + estimator_inputs = [A_samples, A_model_evals] + + self.CI_chatterjee_i = self.bootstrapping( + self.compute_chatterjee_indices, + estimator_inputs, + computed_indices["chatterjee_i"], + num_bootstrap_samples, + confidence_level, + ) + + self.logger.info( + "UQpy: Confidence intervals for Chatterjee indices computed successfully.\n" + ) + + computed_indices["CI_chatterjee_i"] = self.CI_chatterjee_i + + return computed_indices + + @staticmethod + def compute_chatterjee_indices(X, Y, seed=None): + r""" + + Compute the Chatterjee sensitivity indices + between the input random vectors :math:`X=\left[ X_{1}, X_{2},ā€¦,X_{d} \right]` + and output random vector Y. + + :param X: Input random vectors, :class:`numpy.ndarray` of shape :code:`(n_samples, num_vars)` + + :param Y: Output random vector, :class:`numpy.ndarray` of shape :code:`(n_samples, 1)` + + :param seed: Seed for the random number generator. + + :return: Chatterjee sensitivity indices, :class:`numpy.ndarray` of shape :code:`(num_vars, 1)` + + """ + + if seed is not None: + # set seed for reproducibility + np.random.seed(seed) + + N = X.shape[0] # number of samples + m = X.shape[1] # number of variables + + chatterjee_indices = np.zeros((m, 1)) + + for i in range(m): + + # Samples of random variable X_i + X_i = X[:, i].reshape(-1, 1) + + #! For ties in X_i + # we break ties uniformly at random + # Shuffle X_i and Y + _ix = np.arange(N) # indices of X_i + np.random.shuffle(_ix) # shuffle indices + X_i_shuffled = X_i[_ix] # shuffle X_i + Y_shuffled = Y[_ix] # shuffle Y + + Z = np.hstack((X_i_shuffled, Y_shuffled)) + + # Sort the columns of Z by X_i + # such that the tuple (X_i, Y_i) is unchanged + Z_sorted = Z[Z[:, 0].argsort()] + + # Find rank of y_i in the sorted columns of Y + # r[i] is number of j s.t. y[j] <= y[i], + # This is accomplished using rankdata with method='max' + # Example: Y = [1, 2, 3, 3, 4, 5], rank = [1, 2, 4, 4, 5, 6] + rank = scipy.stats.rankdata(Z_sorted[:, 1], method="max") + + #! For ties in Y + # l[i] is number of j s.t. y[i] <= y[j], + # This is accomplished using rankdata with method='max' + # Example: Y = [1, 2, 3, 3, 4, 5], l = [6, 5, 4, 4, 2, 1] + # One could also use the Y_shuffled array, since sum2 only + # multiplies terms of same index, i.e l_i*(n - l_i) + L = scipy.stats.rankdata(-Z_sorted[:, 1], method="max") + + sum1 = np.abs(rank[1:] - rank[:-1]).sum() + + sum2 = np.sum(L * (N - L)) + + chatterjee_indices[i] = 1 - N * sum1 / (2 * sum2) + + return chatterjee_indices + + @staticmethod + def rank_analog_to_pickfreeze(X, j): + r""" + Computing the :math:`N(j)` for each :math:`j \in \{1, \ldots, n\}` + as in eq.(8) in [6]_, where :math:`n` is the size of :math:`X`. + + .. math:: + :nowrap: + + \begin{equation} + N(j):= + \begin{cases} + \pi^{-1}(\pi(j)+1) &\text { if } \pi(j)+1 \leqslant n \\ + \pi^{-1}(1) &\text { if } \pi(j)=n + \end{cases} + \end{equation} + + where, :math:`\pi(j) := \mathrm{rank}(x_j)` + + :param X: Input random vector, :class:`numpy.ndarray` of shape :code:`(n_samples, 1)` + + :param j: Index of the sample :math:`j \in \{1, \ldots, n\}` + + :return: :math:`N(j)` :class:`int` + + """ + + N = X.shape[0] # number of samples + + # Ranks of elements of X_i + # -1 so that the ranks are 0-based + # for convenience in indexing + rank_X = scipy.stats.rankdata(X) - 1 + rank_X = rank_X.astype(int) + + # Find rank of element j + rank_j = rank_X[j] + + if rank_j + 1 <= N - 1: + # Get index of element: rank_j + 1 + return np.where(rank_X == rank_j + 1)[0][0] + + if rank_j == N - 1: + return np.where(rank_X == 0)[0][0] + + @staticmethod + def rank_analog_to_pickfreeze_vec(X): + r""" + Computing the :math:`N(j)` for each :math:`j \in \{1, \ldots, n\}` + in a vectorized manner., where :math:`n` is the size of :math:`X`. + + This method is significantly faster than the looping version + ``rank_analog_to_pickfreeze`` but is also more complicated. + + .. math:: + :nowrap: + + \begin{equation} + N(j):= + \begin{cases} + \pi^{-1}(\pi(j)+1) &\text { if } \pi(j)+1 \leqslant n \\ + \pi^{-1}(1) &\text { if } \pi(j)=n + \end{cases} + \end{equation} + + where, :math:`\pi(j) := \mathrm{rank}(x_j)` + + Key idea: :math:`\pi^{-1}` is rank_X.argsort() ( + `see also `_) + + Example: + X = [22, 74, 44, 11, 1] + + N_J = [3, 5, 2, 1, 4] (1-based indexing) + + N_J = [2, 4, 1, 0, 3] (0-based indexing) + + :param X: Input random vector, :class:`numpy.ndarray` of shape :code:`(n_samples, 1)` + + :return: :math:`N(j)`, :class:`numpy.ndarray` of shape :code:`(n_samples, 1)` + + """ + + N = X.shape[0] # number of samples + N_func = np.zeros((N, 1)) + + # Ranks of elements of X_i + # -1 since ranks are 0-based + rank_X = scipy.stats.rankdata(X, method="ordinal") - 1 + rank_X = rank_X.astype(int) + + # Inverse of pi(j): j = pi^-1(rank_X(j)) + #! This is non-trivial + pi_inverse = rank_X.argsort() # complexity: N*log(N) + + # CONDITION 2 + # Find j with rank_j == N-1 + j_meets_condition_2 = pi_inverse[N - 1] + N_func[j_meets_condition_2] = pi_inverse[0] + + # CONDITION 1 + # Find j's with rank_j + 1 <= N-1 + # term_1 = pi(j) + 1 + j_remaining = np.delete(np.arange(N), j_meets_condition_2) + term_1 = rank_X[j_remaining] + 1 + + j_remaining_meet_condition_1 = pi_inverse[term_1] + + # j_remaining_meet_condition_1 = np.where(rank_X_i == condition) + N_func[j_remaining, 0] = j_remaining_meet_condition_1 + + return N_func.astype(int) + + @staticmethod + def compute_Sobol_indices(A_model_evals, C_i_model_evals): + r""" + A method to estimate the first order Sobol indices using + the Chatterjee method. + + .. math:: + :nowrap: + + \begin{equation} + \xi_{n}^{\mathrm{Sobol}}\left(X_{1}, Y\right):= + \frac{\frac{1}{n} \sum_{j=1}^{n} Y_{j} Y_{N(j)}-\left(\frac{1}{n} \sum_{j=1}^{n} Y_{j}\right)^{2}} + {\frac{1}{n} \sum_{j=1}^{n}\left(Y_{j}\right)^{2}-\left(\frac{1}{n} \sum_{j=1}^{n} Y_{j}\right)^{2}} + \end{equation} + + where the term :math:`Y_{N(j)}` is computed using the method:``rank_analog_to_pickfreeze_vec``. + + :param A_model_evals: Model evaluations, :class:`numpy.ndarray` of shape :code:`(n_samples, 1)` + + :param C_i_model_evals: Model evaluations, :class:`numpy.ndarray` of shape :code:`(n_samples, num_vars)` + + :return: First order Sobol indices, :class:`numpy.ndarray` of shape :code:`(num_vars, 1)` + + """ + + # extract shape + _shape = C_i_model_evals.shape + + # convert C_i_model_evals to 3D array + # with n_outputs=1 in first dimension + n_outputs = 1 + C_i_model_evals = C_i_model_evals.reshape((n_outputs, *_shape)) + + first_order_sobol = compute_first_order_sobol( + A_model_evals, None, C_i_model_evals, scheme="Sobol1993" + ) + + return first_order_sobol + + def compute_rank_analog_of_f_C_i(self, A_samples, A_model_evals): + r""" + In the Pick and Freeze method, we use model evaluations + :math:`f_A`, :math:`f_B`, :math:`f_{C_{i}}` + to compute the Sobol indices. + + Gamboa et al. provide a rank analog to :math:`f_{C_{i}}` in eq. (6) in [6]_. + + **Inputs:** + + * **A_samples** (`ndarray`): + Shape: `(n_samples, num_vars)`. + + * **A_model_evals** (`ndarray`): + Shape: `(n_samples, 1)`. + + **Outputs:** + + * **A_i_model_evals** (`ndarray`): + Shape: `(n_samples, num_vars)`. + + """ + + f_A = A_model_evals + N = f_A.shape[0] + m = self.num_vars + + A_i_model_evals = np.zeros((N, m)) + + for i in range(m): + + K = self.rank_analog_to_pickfreeze_vec(A_samples[:, i]) + + A_i_model_evals[:, i] = f_A[K].ravel() + + return A_i_model_evals From c86aea248903b03f72c0b918556d911b13ab3f41 Mon Sep 17 00:00:00 2001 From: Prateek Bhustali Date: Sun, 8 May 2022 23:48:44 +0200 Subject: [PATCH 19/88] Minor docstring fix --- src/UQpy/sensitivity/cramer_von_mises.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/UQpy/sensitivity/cramer_von_mises.py b/src/UQpy/sensitivity/cramer_von_mises.py index 229a8b761..66421b440 100644 --- a/src/UQpy/sensitivity/cramer_von_mises.py +++ b/src/UQpy/sensitivity/cramer_von_mises.py @@ -83,7 +83,7 @@ def __init__( "Number of samples used to compute the CramĆ©r-von Mises indices, :class:`int`" self.num_vars = None - "Number of random variables, :class:`int`" + "Number of input random variables, :class:`int`" def run( self, From 0b5a6662be6e673ff2ffd34807f1cb8afa3f5f53 Mon Sep 17 00:00:00 2001 From: Prateek Bhustali Date: Sun, 8 May 2022 23:49:46 +0200 Subject: [PATCH 20/88] Added documentation Chatterjee sensitivity --- docs/code/sensitivity/chatterjee/README.rst | 15 ++++++++ docs/source/conf.py | 2 + docs/source/sensitivity/chatterjee.rst | 41 +++++++++++++++++++++ docs/source/sensitivity/index.rst | 2 + 4 files changed, 60 insertions(+) create mode 100644 docs/code/sensitivity/chatterjee/README.rst create mode 100644 docs/source/sensitivity/chatterjee.rst diff --git a/docs/code/sensitivity/chatterjee/README.rst b/docs/code/sensitivity/chatterjee/README.rst new file mode 100644 index 000000000..590eee2a7 --- /dev/null +++ b/docs/code/sensitivity/chatterjee/README.rst @@ -0,0 +1,15 @@ +Chatterjee indices +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +These examples serve as a guide for using the Chatterjee sensitivity module. They have been taken from various papers to enable validation of the implementation and have been referenced accordingly. + +1. Ishigami function + +2. Exponential function + +For the Exponential model, analytical Cramer-von Mises indices are available, since they are equivalent to the Chatterjee indices, they are shown here. + +3. Sobol function + +This example was considered in [1] page 18. + +.. [1] Fabrice Gamboa, Pierre Gremaud, Thierry Klein, and AgnĆØs Lagnoux. (2020). Global Sensitivity Analysis: a new generation of mighty estimators based on rank statistics. \ No newline at end of file diff --git a/docs/source/conf.py b/docs/source/conf.py index 9c31fa120..7d113e439 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -93,6 +93,7 @@ "../code/sensitivity/morris", "../code/sensitivity/sobol", "../code/sensitivity/cramer_von_mises", + "../code/sensitivity/chatterjee", "../code/stochastic_processes/bispectral", "../code/stochastic_processes/karhunen_loeve", "../code/stochastic_processes/spectral", @@ -129,6 +130,7 @@ "auto_examples/sensitivity/morris", "auto_examples/sensitivity/sobol", "auto_examples/sensitivity/cramer_von_mises", + "auto_examples/sensitivity/chatterjee", "auto_examples/stochastic_processes/bispectral", "auto_examples/stochastic_processes/karhunen_loeve", "auto_examples/stochastic_processes/spectral", diff --git a/docs/source/sensitivity/chatterjee.rst b/docs/source/sensitivity/chatterjee.rst new file mode 100644 index 000000000..88f77889b --- /dev/null +++ b/docs/source/sensitivity/chatterjee.rst @@ -0,0 +1,41 @@ +Chatterjee indices +---------------------------------------- + +The Chatterjee index measures the strength of the relationship between :math:`X` and :math:`Y` using rank statistics. + +Consider :math:`n` samples of random variables :math:`X` and :math:`Y`, with :math:`(X_{(1)}, Y_{(1)}), \ldots,(X_{(n)}, Y_{(n)})` such that :math:`X_{(1)} \leq \cdots \leq X_{(n)}`. Here, random variable :math:`X` can be one of the inputs of a model and :math:`Y` be the model response. If :math:`X_{i}`'s have no ties, there is a unique way of doing this (case of ties is also taken into account in the implementation, see [1]_). Let :math:`r_{i}`` be the rank of :math:`Y_{(i)}`, that is, the number of :math:`j` such that :math:`Y_{(j)} \leq Y_{(i)}`.The Chatterjee index :math:`\xi_{n}(X, Y)` is defined as: + +.. math:: + + \xi_{n}(X, Y):=1-\frac{3 \sum_{i=1}^{n-1}\left|r_{i+1}-r_{i}\right|}{n^{2}-1} + +The Chatterjee index converges for :math:`n \rightarrow \infty` to the CramĆ©r-von Mises index and is faster to estimate than using the Pick and Freeze approach in the Cramer-von Mises index. + +.. [1] Sourav Chatterjee (2021) A New Coefficient of Correlation, Journal of the American Statistical Association, 116:536, 2009-2022, DOI: 10.1080/01621459.2020.1758115 (`Link `_) + +Chatterjee Class +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The :class:`Chatterjee` class is imported using the following command: + +>>> from UQpy.sensitivity.chatterjee import Chatterjee + +Methods +""""""" +.. autoclass:: UQpy.sensitivity.Chatterjee + :members: run, compute_chatterjee_indices, rank_analog_to_pickfreeze, compute_Sobol_indices + +Attributes +"""""""""" +.. autoattribute:: UQpy.sensitivity.Chatterjee.chatterjee_i +.. autoattribute:: UQpy.sensitivity.Chatterjee.sobol_i +.. autoattribute:: UQpy.sensitivity.Chatterjee.CI_chatterjee_i +.. autoattribute:: UQpy.sensitivity.Chatterjee.num_vars +.. autoattribute:: UQpy.sensitivity.Chatterjee.n_samples + +Examples +"""""""""" + +.. toctree:: + + Chatterjee Examples <../auto_examples/sensitivity/chatterjee/index> diff --git a/docs/source/sensitivity/index.rst b/docs/source/sensitivity/index.rst index 4bb68e1a9..e663eed66 100644 --- a/docs/source/sensitivity/index.rst +++ b/docs/source/sensitivity/index.rst @@ -5,6 +5,7 @@ This module contains functionality for all the sampling methods supported in :py The module currently contains the following classes: +- :py:class:`.CramervonMises`: Class to compute Chatterjee sensitivity indices. - :py:class:`.CramervonMises`: Class to compute CramĆ©r-von Mises sensitivity indices. - :py:class:`.MorrisSensitivity`: Class to perform Morris. - :py:class:`.PceSensitivity`: Class to compute the sensitivity indices using the :class:`.PolynomialChaosExpansion` method. @@ -18,6 +19,7 @@ Sensitivity analysis comprises techniques focused on determining how the variati :hidden: :caption: Sensitivity + Chatterjee CramĆ©r-von Mises Sensitivity Morris Sensitivity Polynomial Chaos Sensitivity From e4407c667e188acbaa0fe1af97727f3df95f1f18 Mon Sep 17 00:00:00 2001 From: Prateek Bhustali Date: Sun, 8 May 2022 23:50:15 +0200 Subject: [PATCH 21/88] Added examples Chatterjee sensitivity --- .../chatterjee/local_exponential.py | 20 ++++++ .../sensitivity/chatterjee/local_ishigami.py | 23 ++++++ .../chatterjee/local_sobol_func.py | 42 +++++++++++ .../chatterjee/plot_chatterjee_exponential.py | 54 ++++++++++++++ .../chatterjee/plot_chatterjee_ishigami.py | 58 +++++++++++++++ .../chatterjee/plot_chatterjee_sobol_func.py | 70 +++++++++++++++++++ 6 files changed, 267 insertions(+) create mode 100644 docs/code/sensitivity/chatterjee/local_exponential.py create mode 100644 docs/code/sensitivity/chatterjee/local_ishigami.py create mode 100644 docs/code/sensitivity/chatterjee/local_sobol_func.py create mode 100644 docs/code/sensitivity/chatterjee/plot_chatterjee_exponential.py create mode 100644 docs/code/sensitivity/chatterjee/plot_chatterjee_ishigami.py create mode 100644 docs/code/sensitivity/chatterjee/plot_chatterjee_sobol_func.py diff --git a/docs/code/sensitivity/chatterjee/local_exponential.py b/docs/code/sensitivity/chatterjee/local_exponential.py new file mode 100644 index 000000000..1fd0ef0d9 --- /dev/null +++ b/docs/code/sensitivity/chatterjee/local_exponential.py @@ -0,0 +1,20 @@ +""" + +Auxiliary file +============================================== + +""" + +import numpy as np + + +def evaluate(X: np.array) -> np.array: + r"""A non-linear function that is used to demonstrate sensitivity index. + + .. math:: + f(x) = \exp(x_1 + 2*x_2) + """ + + Y = np.exp(X[:, 0] + 2 * X[:, 1]) + + return Y diff --git a/docs/code/sensitivity/chatterjee/local_ishigami.py b/docs/code/sensitivity/chatterjee/local_ishigami.py new file mode 100644 index 000000000..e5af649fe --- /dev/null +++ b/docs/code/sensitivity/chatterjee/local_ishigami.py @@ -0,0 +1,23 @@ +""" + +Auxiliary file +============================================== + +""" + +import numpy as np + + +def evaluate(X, params=[7, 0.1]): + """Non-monotonic Ishigami-Homma three parameter test function""" + + a = params[0] + b = params[1] + + Y = ( + np.sin(X[:, 0]) + + a * np.power(np.sin(X[:, 1]), 2) + + b * np.power(X[:, 2], 4) * np.sin(X[:, 0]) + ) + + return Y diff --git a/docs/code/sensitivity/chatterjee/local_sobol_func.py b/docs/code/sensitivity/chatterjee/local_sobol_func.py new file mode 100644 index 000000000..1ccabc6dd --- /dev/null +++ b/docs/code/sensitivity/chatterjee/local_sobol_func.py @@ -0,0 +1,42 @@ +""" + +Auxiliary file +============================================== + +""" + +import numpy as np +import copy + + +def evaluate(X, a_values): + + dims = len(a_values) + g = 1 + + for i in range(dims): + g_i = (np.abs(4 * X[:, i] - 2) + a_values[i]) / (1 + a_values[i]) + g *= g_i + + return g + + +def sensitivities(a_values): + + dims = len(a_values) + + Total_order = np.zeros((dims, 1)) + + V_i = (3 * (1 + a_values) ** 2) ** (-1) + + total_variance = np.prod(1 + V_i) - 1 + + First_order = V_i / total_variance + + for i in range(dims): + + rem_First_order = copy.deepcopy(V_i) + rem_First_order[i] = 0 + Total_order[i] = V_i[i] * np.prod(rem_First_order + 1) / total_variance + + return First_order.reshape(-1, 1), Total_order diff --git a/docs/code/sensitivity/chatterjee/plot_chatterjee_exponential.py b/docs/code/sensitivity/chatterjee/plot_chatterjee_exponential.py new file mode 100644 index 000000000..2922b97af --- /dev/null +++ b/docs/code/sensitivity/chatterjee/plot_chatterjee_exponential.py @@ -0,0 +1,54 @@ +""" + +Exponential function +============================================== + +.. math:: + f(x) := \exp(x_1 + 2x_2), \quad x_1, x_2 \sim \mathcal{N}(0, 1) + +""" + +# %% +from UQpy.run_model.RunModel import RunModel +from UQpy.run_model.model_execution.PythonModel import PythonModel +from UQpy.distributions import Normal +from UQpy.distributions.collection.JointIndependent import JointIndependent +from UQpy.sensitivity.chatterjee import Chatterjee + +# %% +# Create Model object +model = PythonModel( + model_script="local_exponential.py", + model_object_name="evaluate", + var_names=[ + "X_1", + "X_2", + ], + delete_files=True, +) + +runmodel_obj = RunModel(model=model) + +# Define distribution object +dist_object = JointIndependent([Normal(0, 1)] * 2) + +# %% [markdown] +# Compute Chatterjee indices + +# %% +SA = Chatterjee(runmodel_obj, dist_object) + +# Compute Sobol indices using the pick and freeze algorithm +computed_indices = SA.run(n_samples=1_000_000) + +# %% [markdown] +# Cramer-von Mises sensitivity analysis +# +# Expected value of the sensitivity indices: +# +# $S^1_{CVM} = \frac{6}{\pi} \operatorname{arctan}(2) - 2 \approx 0.1145$ +# +# $S^2_{CVM} = \frac{6}{\pi} \operatorname{arctan}(\sqrt{19}) - 2 \approx 0.5693$ + +# %% +computed_indices["chatterjee_i"] diff --git a/docs/code/sensitivity/chatterjee/plot_chatterjee_ishigami.py b/docs/code/sensitivity/chatterjee/plot_chatterjee_ishigami.py new file mode 100644 index 000000000..66897d670 --- /dev/null +++ b/docs/code/sensitivity/chatterjee/plot_chatterjee_ishigami.py @@ -0,0 +1,58 @@ +r""" + +Ishigami function +============================================== + +.. math:: + f(x_1, x_2, x_3) = sin(x_1) + a \cdot sin^2(x_2) + b \cdot x_3^4 sin(x_1) + +.. math:: + x_1, x_2, x_3 \sim \mathcal{U}(-\pi, \pi), \quad a, b\in \mathbb{R} + +""" + +# %% +import numpy as np + +from UQpy.run_model.RunModel import RunModel +from UQpy.run_model.model_execution.PythonModel import PythonModel +from UQpy.distributions import Uniform +from UQpy.distributions.collection.JointIndependent import JointIndependent +from UQpy.sensitivity.chatterjee import Chatterjee + +# %% +# Create Model object +model = PythonModel( + model_script="local_ishigami.py", + model_object_name="evaluate", + var_names=[r"$X_1$", "$X_2$", "$X_3$"], + delete_files=True, + params=[7, 0.1], +) + +runmodel_obj = RunModel(model=model) + +# Define distribution object +dist_object = JointIndependent([Uniform(-np.pi, 2 * np.pi)] * 3) + +# %% [markdown] +# Compute Chatterjee indices + +# %% +SA = Chatterjee(runmodel_obj, dist_object) + +computed_indices = SA.run( + n_samples=100_000, + estimate_sobol_indices=True, + num_bootstrap_samples=100, + confidence_level=0.95, +) + +# %% +computed_indices["chatterjee_i"] + +# %% +computed_indices["CI_chatterjee_i"] + +# %% +computed_indices["sobol_i"] diff --git a/docs/code/sensitivity/chatterjee/plot_chatterjee_sobol_func.py b/docs/code/sensitivity/chatterjee/plot_chatterjee_sobol_func.py new file mode 100644 index 000000000..578131426 --- /dev/null +++ b/docs/code/sensitivity/chatterjee/plot_chatterjee_sobol_func.py @@ -0,0 +1,70 @@ +r""" + +Sobol function +============================================== + +.. math:: + + g(x_1, x_2, \ldots, x_D) := \prod_{i=1}^{D} \frac{|4x_i - 2| + a_i}{1 + a_i}, + +where, + +.. math:: + x_i \sim \mathcal{U}(0, 1), \quad a_i \in \mathbb{R}. + +""" + +# %% +import numpy as np + +from UQpy.run_model.RunModel import RunModel +from UQpy.run_model.model_execution.PythonModel import PythonModel +from UQpy.distributions import Uniform +from UQpy.distributions.collection.JointIndependent import JointIndependent +from UQpy.sensitivity.chatterjee import Chatterjee + +# %% +# Create Model object +num_vars = 6 +a_vals = np.array([0.0, 0.5, 3.0, 9.0, 99.0, 99.0]) + +model = PythonModel( + model_script="local_sobol_func.py", + model_object_name="evaluate", + var_names=["X_" + str(i) for i in range(num_vars)], + delete_files=True, + a_values=a_vals, +) + +runmodel_obj = RunModel(model=model) + +# Define distribution object +dist_object = JointIndependent([Uniform(0, 1)] * num_vars) + +# %% [markdown] +# Compute Chatterjee indices + +# %% +SA = Chatterjee(runmodel_obj, dist_object) + +# Compute Sobol indices using the pick and freeze algorithm +computed_indices = SA.run(n_samples=500_000, estimate_sobol_indices=True) + +# %% +computed_indices["chatterjee_i"] + +# %% [markdown] +# $S_1$ = 5.86781190e-01 +# +# $S_2$ = 2.60791640e-01 +# +# $S_3$ = 3.66738244e-02 +# +# $S_4$ = 5.86781190e-03 +# +# $S_5$ = 5.86781190e-05 +# +# $S_6$ = 5.86781190e-05 + +# %% +computed_indices["sobol_i"] From e181e738dfe4c441cb917a67e2b6c2736e7b2f0f Mon Sep 17 00:00:00 2001 From: Prateek Bhustali Date: Sun, 8 May 2022 23:50:29 +0200 Subject: [PATCH 22/88] Add unit tests for Chatterjee sensitivity --- .../unit_tests/sensitivity/test_chatterjee.py | 230 ++++++++++++++++++ 1 file changed, 230 insertions(+) create mode 100644 tests/unit_tests/sensitivity/test_chatterjee.py diff --git a/tests/unit_tests/sensitivity/test_chatterjee.py b/tests/unit_tests/sensitivity/test_chatterjee.py new file mode 100644 index 000000000..8a7c6495f --- /dev/null +++ b/tests/unit_tests/sensitivity/test_chatterjee.py @@ -0,0 +1,230 @@ +""" +This is the test module for the Chatterjee sensitivity indices. + +Here, we will use the exponential function to test the output, as in +the test module for Cramer sensitivity indices for the Chatterjee indices and +the ishigami function as in the test module for Sobol sensitivity indices for the +Sobol indices. + +The following methods are tested: +1. pick_and_freeze_estimator +2. Sobol estimate + +Important +---------- +The computed indices are computed using the `np.isclose` function. + +Function signature: + numpy.isclose(a, b, rtol=1e-05, atol=1e-08, equal_nan=False) + + Parameters: + a, b: array_like + Input arrays to compare. + + rtol: float + The relative tolerance parameter. + + atol: float + The absolute tolerance parameter. + +Each element of the `diff` array is compared as follows: +diff = |a - b| +diff <= atol + rtol * abs(b) + +- relative tolerance: rtol * abs(b) + It is the maximum allowed difference between a and b, + relative to the absolute value of b. + For example, to set a tolerance of 1%, pass rol=0.01, + which assures that the values are within 2 decimal places of each other. + +- absolute tolerance: atol + When b is close to zero, the atol value is used. + +""" + +import numpy as np +import pytest + +from UQpy.run_model.RunModel import RunModel +from UQpy.run_model.model_execution.PythonModel import PythonModel +from UQpy.distributions import Uniform, Normal +from UQpy.distributions.collection.JointIndependent import JointIndependent +from UQpy.sensitivity.chatterjee import Chatterjee + + +# Prepare +############################################################################### + +# Prepare the input distribution +@pytest.fixture() +def exponential_input_dist_object(): + """ + This function returns the input distribution for the Ishigami function. + + X1 ~ Normal(0,1) + X2 ~ Normal(0,1) + + """ + return JointIndependent([Normal(0, 1)] * 2) + + +@pytest.fixture() +def exponential_model_object(): + """This function creates the exponential run_model_object""" + model = PythonModel( + model_script="exponential.py", + model_object_name="evaluate", + var_names=[ + "X_1", + "X_2", + ], + delete_files=True, + ) + + runmodel_obj = RunModel(model=model) + + return runmodel_obj + + +@pytest.fixture() +def Chatterjee_object(exponential_model_object, exponential_input_dist_object): + """This function creates the Chatterjee object""" + return Chatterjee(exponential_model_object, exponential_input_dist_object) + + +@pytest.fixture() +def analytical_Chatterjee_indices(): + """This function returns the analytical Chatterjee indices. + + S1 = (6/np.pi) * np.arctan(2) - 2 + S2 = (6/np.pi) * np.arctan(np.sqrt(19)) - 2 + + print(np.around(S1, 4)) + print(np.around(S2, 4)) + + """ + + return np.array([[0.1145], [0.5693]]) + + +@pytest.fixture() +def numerical_Chatterjee_indices(Chatterjee_object): + """This function returns the numerical Chatterjee indices.""" + + SA = Chatterjee_object + + np.random.seed(12345) #! set seed for reproducibility + + computed_indices = SA.run(n_samples=10_000) + + return computed_indices["chatterjee_i"] + + +@pytest.fixture() +def ishigami_input_dist_object(): + """ + This function returns the input distribution for the Ishigami function. + + X1 ~ Uniform(-pi, pi) + X2 ~ Uniform(-pi, pi) + X3 ~ Uniform(-pi, pi) + + """ + return JointIndependent([Uniform(-np.pi, 2 * np.pi)] * 3) + + +@pytest.fixture() +def ishigami_model_object(): + """This function creates the Ishigami run_model_object""" + model = PythonModel( + model_script="ishigami.py", + model_object_name="evaluate", + var_names=[r"$X_1$", "$X_2$", "$X_3$"], + delete_files=True, + params=[7, 0.1], + ) + + runmodel_obj = RunModel(model=model) + + return runmodel_obj + + +@pytest.fixture() +def Chatterjee_object_ishigami(ishigami_model_object, ishigami_input_dist_object): + """This function creates the Chatterjee object""" + return Chatterjee(ishigami_model_object, ishigami_input_dist_object) + + +@pytest.fixture() +def numerical_Sobol_indices(Chatterjee_object_ishigami): + """This function returns the Sobol indices.""" + + SA = Chatterjee_object_ishigami + + np.random.seed(12345) + + computed_indices = SA.run(n_samples=10_000, estimate_sobol_indices=True) + + return computed_indices["sobol_i"] + + +@pytest.fixture() +def analytical_ishigami_Sobol_indices(): + """ + Analytical Sobol indices for the Ishigami function. + + Copy-paste the following to reproduce the given indices: + + a = 7 + b = 0.1 + + V1 = 0.5*(1 + (b*np.pi**4)/5)**2 + V2 = (a**2)/8 + V3 = 0 + + VT3 = (8*(b**2)*np.pi**8)/225 + VT1 = V1 + VT3 + VT2 = V2 + + total_variance = V2 + (b*np.pi**4)/5 + ((b**2) * np.pi**8)/18 + 0.5 + + S = np.array([V1, V2, V3])/total_variance + S_T = np.array([VT1, VT2, VT3])/total_variance + + S = np.around(S, 4) + S_T = np.around(S_T, 4) + + """ + + S1 = 0.3139 + S2 = 0.4424 + S3 = 0 + + S_T1 = 0.5576 + S_T2 = 0.4424 + S_T3 = 0.2437 + + S = np.array([S1, S2, S3]) + S_T = np.array([S_T1, S_T2, S_T3]) + + return S.reshape(-1, 1) + + +# Unit tests +############################################################################### + + +def test_Chatterjee_estimate( + numerical_Chatterjee_indices, analytical_Chatterjee_indices +): + """This function tests the Chatterjee estimate.""" + assert np.isclose( + numerical_Chatterjee_indices, analytical_Chatterjee_indices, rtol=0, atol=1e-2 + ).all() + + +def test_Sobol_estimate(numerical_Sobol_indices, analytical_ishigami_Sobol_indices): + """This function tests the Sobol estimate.""" + assert np.isclose( + numerical_Sobol_indices, analytical_ishigami_Sobol_indices, rtol=0, atol=1e-2 + ).all() From 1e6118f00155192508aedf0e12cf0df126cff5d4 Mon Sep 17 00:00:00 2001 From: Prateek Bhustali Date: Mon, 9 May 2022 03:23:33 +0200 Subject: [PATCH 23/88] Added generalised sobol sensitivity --- src/UQpy/sensitivity/__init__.py | 2 + src/UQpy/sensitivity/generalised_sobol.py | 378 ++++++++++++++++++++++ 2 files changed, 380 insertions(+) create mode 100644 src/UQpy/sensitivity/generalised_sobol.py diff --git a/src/UQpy/sensitivity/__init__.py b/src/UQpy/sensitivity/__init__.py index 73c4166da..e25335a34 100644 --- a/src/UQpy/sensitivity/__init__.py +++ b/src/UQpy/sensitivity/__init__.py @@ -3,9 +3,11 @@ from UQpy.sensitivity.sobol import Sobol from UQpy.sensitivity.cramer_von_mises import CramervonMises from UQpy.sensitivity.chatterjee import Chatterjee +from UQpy.sensitivity.generalised_sobol import GeneralisedSobol from . import MorrisSensitivity from . import PceSensitivity from . import Sobol from . import CramervonMises from . import Chatterjee +from . import GeneralisedSobol diff --git a/src/UQpy/sensitivity/generalised_sobol.py b/src/UQpy/sensitivity/generalised_sobol.py new file mode 100644 index 000000000..e5cf2f654 --- /dev/null +++ b/src/UQpy/sensitivity/generalised_sobol.py @@ -0,0 +1,378 @@ +""" + +The GeneralisedSobol class computes the generalised Sobol indices for a given +multi-ouput model. The class is based on the work of [1]_ and [2]_. + +Additionally, we can compute the confidence intervals for the Sobol indices +using bootstrapping [3]_. + +References +---------- + + .. [1] Gamboa F, Janon A, Klein T, Lagnoux A, others. + Sensitivity analysis for multidimensional and functional outputs. + Electronic journal of statistics 2014; 8(1): 575-603. + + .. [2] Alexanderian A, Gremaud PA, Smith RC. Variance-based sensitivity + analysis for time-dependent processes. Reliability engineering + & system safety 2020; 196: 106722. + +.. [3] Jeremy Orloff and Jonathan Bloom (2014), Bootstrap confidence intervals, + Introduction to Probability and Statistics, MIT OCW. + +""" + +import logging + +import numpy as np + +from UQpy.sensitivity.baseclass.sensitivity import Sensitivity +from UQpy.sensitivity.baseclass.pickfreeze import generate_pick_freeze_samples +from UQpy.utilities.UQpyLoggingFormatter import UQpyLoggingFormatter + + +class GeneralisedSobol(Sensitivity): + """ + Compute the generalised Sobol indices for models with multiple outputs + (vector-valued response) using the Pick-and-Freeze method. + + :param runmodel_object: The computational model. It should be of type :class:`.RunModel`. \ + The output QoI can be a scalar or vector of length :code:`ny`, then the sensitivity \ + indices of all :code:`ny` outputs are computed independently. + + :param distributions: List of :class:`.Distribution` objects corresponding to each \ + random variable, or :class:`.JointIndependent` object \ + (multivariate RV with independent marginals). + + :param random_state: Random seed used to initialize the pseudo-random number \ + generator. Default is :any:`None`. + + **Methods:** + """ + + def __init__( + self, runmodel_object, dist_object, random_state=None, **kwargs + ) -> None: + + super().__init__(runmodel_object, dist_object, random_state, **kwargs) + + # Create logger with the same name as the class + self.logger = logging.getLogger(__name__) + self.logger.setLevel(logging.ERROR) + frmt = UQpyLoggingFormatter() + + # create console handler with a higher log level + ch = logging.StreamHandler() + ch.setFormatter(frmt) + + # add the handler to the logger + self.logger.addHandler(ch) + + self.gen_sobol_i = None + "Generalised first order Sobol indices, :class:`ndarray` of shape (num_vars, 1)" + + self.gen_sobol_total_i = None + "Generalised total order Sobol indices, :class:`ndarray` of shape (num_vars, 1)" + + self.n_samples = None + "Number of samples used to compute the sensitivity indices, :class:`int`" + + self.num_vars = None + "Number of model input variables, :class:`int`" + + def run( + self, + n_samples=1_000, + num_bootstrap_samples=None, + confidence_level=0.95, + ): + + """ + Compute the generalised Sobol indices for models with multiple outputs + (vector-valued response) using the Pick-and-Freeze method. + + :param n_samples: Number of samples used to compute the sensitivity indices. \ + Default is 1,000. + + :param num_bootstrap_samples: Number of bootstrap samples used to compute the \ + confidence intervals. Default is :any:`None`. + + :param confidence_level: Confidence level used to compute the confidence \ + intervals. Default is 0.95. + + :return: A :class:`dict` with the following keys: \ + :code:`gen_sobol_i` of shape :code:`(num_vars, 1)`, \ + :code:`gen_sobol_total_i` of shape :code:`(num_vars, 1)`, \ + :code:`CI_gen_sobol_i` of shape :code:`(num_vars, 2)`, \ + :code:`CI_gen_sobol_total_i` of shape :code:`(num_vars, 2)`. + + """ + + # Check n_samples data type + self.n_samples = n_samples + if not isinstance(self.n_samples, int): + raise TypeError("UQpy: n_samples should be an integer") + + # Check num_bootstrap_samples data type + if num_bootstrap_samples is not None: + if not isinstance(num_bootstrap_samples, int): + raise TypeError("UQpy: num_bootstrap_samples should be an integer.\n") + elif num_bootstrap_samples is None: + self.logger.info( + "UQpy: num_bootstrap_samples is set to None, confidence intervals will not be computed.\n" + ) + + ################## GENERATE SAMPLES ################## + + (A_samples, B_samples, C_i_generator, _,) = generate_pick_freeze_samples( + self.dist_object, self.n_samples, self.random_state + ) + + self.logger.info("UQpy: Generated samples using the pick-freeze scheme.\n") + + self.num_vars = A_samples.shape[1] # Number of variables + + ################# MODEL EVALUATIONS #################### + + A_model_evals = self._run_model(A_samples) # shape: (n_samples, n_outputs) + + # if model output is vectorised, + # shape retured by model is (n_samples, n_outputs, 1) + # we need to reshape it to (n_samples, n_outputs) + if A_model_evals.ndim == 3: + A_model_evals = A_model_evals[:, :, 0] # shape: (n_samples, n_outputs) + + self.logger.info("UQpy: Model evaluations A completed.\n") + + B_model_evals = self._run_model(B_samples) # shape: (n_samples, n_outputs) + + # if model output is vectorised, + # shape retured by model is (n_samples, n_outputs, 1) + # we need to reshape it to (n_samples, n_outputs) + if B_model_evals.ndim == 3: + B_model_evals = B_model_evals[:, :, 0] # shape: (n_samples, n_outputs) + + self.logger.info("UQpy: Model evaluations B completed.\n") + + self.n_outputs = A_model_evals.shape[1] + + # shape: (n_outputs, n_samples, num_vars) + C_i_model_evals = np.zeros((self.n_outputs, self.n_samples, self.num_vars)) + + for i, C_i in enumerate(C_i_generator): + + # if model output is vectorised, + # shape retured by model is (n_samples, n_outputs, 1) + # we need to reshape it to (n_samples, n_outputs) + model_evals = self._run_model(C_i) + + if model_evals.ndim == 3: + C_i_model_evals[:, :, i] = self._run_model(C_i)[:, :, 0].T + else: + C_i_model_evals[:, :, i] = model_evals.T + + self.logger.info("UQpy: Model evaluations C completed.\n") + + self.logger.info("UQpy: All model evaluations computed successfully.\n") + + ######################### STORAGE ######################## + + # Create dictionary to store the sensitivity indices + computed_indices = {} + + ################## COMPUTE GENERALISED SOBOL INDICES ################## + + self.gen_sobol_i = self.compute_first_order_generalised_sobol_indices( + A_model_evals, B_model_evals, C_i_model_evals + ) + + self.logger.info( + "UQpy: First order Generalised Sobol indices computed successfully.\n" + ) + + self.gen_sobol_total_i = self.compute_total_order_generalised_sobol_indices( + A_model_evals, B_model_evals, C_i_model_evals + ) + + self.logger.info( + "UQpy: Total order Generalised Sobol indices computed successfully.\n" + ) + + # Store the indices in the dictionary + computed_indices["gen_sobol_i"] = self.gen_sobol_i + computed_indices["gen_sobol_total_i"] = self.gen_sobol_total_i + + ################## CONFIDENCE INTERVALS #################### + + if num_bootstrap_samples is not None: + + self.logger.info("UQpy: Computing confidence intervals ...\n") + + estimator_inputs = [ + A_model_evals, + B_model_evals, + C_i_model_evals, + ] + + # First order generalised Sobol indices + self.CI_gen_sobol_i = self.bootstrapping( + self.compute_first_order_generalised_sobol_indices, + estimator_inputs, + computed_indices["gen_sobol_i"], + num_bootstrap_samples, + confidence_level, + ) + + self.logger.info( + "UQpy: Confidence intervals for First order Generalised Sobol indices computed successfully.\n" + ) + + # Total order generalised Sobol indices + self.CI_gen_sobol_total_i = self.bootstrapping( + self.compute_total_order_generalised_sobol_indices, + estimator_inputs, + computed_indices["gen_sobol_total_i"], + num_bootstrap_samples, + confidence_level, + ) + + self.logger.info( + "UQpy: Confidence intervals for Total order Sobol Generalised indices computed successfully.\n" + ) + + # Store the indices in the dictionary + computed_indices["CI_gen_sobol_i"] = self.CI_gen_sobol_i + computed_indices["CI_gen_sobol_total_i"] = self.CI_gen_sobol_total_i + + return computed_indices + + @staticmethod + def compute_first_order_generalised_sobol_indices( + A_model_evals, B_model_evals, C_i_model_evals + ): + + """ + Compute the generalised Sobol indices for models with multiple outputs. + + :param A_model_evals: Model evaluations, :class:`numpy.ndarray` of shape :code:`(n_samples, n_outputs)`. + :param B_model_evals: Model evaluations, :class:`numpy.ndarray` of shape :code:`(n_samples, n_outputs)`. + :param C_i_model_evals: Model evaluations, :class:`numpy.ndarray` of shape :code:`(n_outputs, n_samples, num_vars)`. + + :return: First order generalised Sobol indices, :class:`numpy.ndarray` of shape :code:`(n_outputs, num_vars)`. + + """ + + num_vars = C_i_model_evals.shape[2] + n_outputs = A_model_evals.shape[1] + + # store generalised Sobol indices + gen_sobol_i = np.zeros((num_vars, 1)) + + for i in range(num_vars): + + all_Y_i = A_model_evals.T # shape: (n_outputs, n_samples) + all_Y_i_tilde = B_model_evals.T # shape: (n_outputs, n_samples) + all_Y_i_u = C_i_model_evals[:, :, i] # shape: (n_outputs, n_samples) + + # compute the mean using all model evaluations + # shape: (n_outputs, 1) + mean = ( + np.mean(all_Y_i, axis=1, keepdims=1) + + np.mean(all_Y_i_u, axis=1, keepdims=1) + + np.mean(all_Y_i_tilde, axis=1, keepdims=1) + ) / 3 + + # center the evaluations since mean is available + all_Y_i = all_Y_i - mean + all_Y_i_tilde = all_Y_i_tilde - mean + all_Y_i_u = all_Y_i_u - mean + + # compute the variance matrix using all available model evaluations + # shape: (n_outputs, n_outputs) + C = (np.cov(all_Y_i) + np.cov(all_Y_i_u) + np.cov(all_Y_i_tilde)) / 3 + + # compute covariance btw. RVs 'X' and 'Y' + # shape: (2*n_outputs, 2*n_outputs) + # It contains the following 4 block matrices: + # (1, 1) variance of 'X' + # *(1, 2) covariance between 'X' and 'Y' (a.k.a. cross-covariance) + # (2, 1) covariance between 'Y' and 'X' (a.k.a. cross-covariance) + # (2, 2) variance of 'Y' + _cov_1 = np.cov(all_Y_i_u, all_Y_i) # for first order indices + + # We need the cross-covariance between 'X' and 'Y' + # Extract *(1, 2) (upper right block) + # shape: (n_outputs, n_outputs) + C_u = _cov_1[0:n_outputs, n_outputs : 2 * n_outputs] + + denominator = np.trace(C) + + # Generalised Sobol indices + gen_sobol_i[i] = np.trace(C_u) / denominator + + return gen_sobol_i + + @staticmethod + def compute_total_order_generalised_sobol_indices( + A_model_evals, B_model_evals, C_i_model_evals + ): + + """ + Compute the generalised Sobol indices for models with multiple outputs. + + :param A_model_evals: Model evaluations, :class:`numpy.ndarray` of shape :code:`(n_samples, n_outputs)`. + :param B_model_evals: Model evaluations, :class:`numpy.ndarray` of shape :code:`(n_samples, n_outputs)`. + :param C_i_model_evals: Model evaluations, :class:`numpy.ndarray` of shape :code:`(n_outputs, n_samples, num_vars)`. + + :return: Total order generalised Sobol indices, :class:`numpy.ndarray` of shape :code:`(n_outputs, num_vars)`. + + """ + + num_vars = C_i_model_evals.shape[2] + n_outputs = A_model_evals.shape[1] + + # store generalised Sobol indices + gen_sobol_total_i = np.zeros((num_vars, 1)) + + for i in range(num_vars): + + all_Y_i = A_model_evals.T # shape: (n_outputs, n_samples) + all_Y_i_tilde = B_model_evals.T # shape: (n_outputs, n_samples) + all_Y_i_u = C_i_model_evals[:, :, i] # shape: (n_outputs, n_samples) + + # compute the mean using all model evaluations + # shape: (n_outputs, 1) + mean = ( + np.mean(all_Y_i, axis=1, keepdims=1) + + np.mean(all_Y_i_u, axis=1, keepdims=1) + + np.mean(all_Y_i_tilde, axis=1, keepdims=1) + ) / 3 + + # center the evaluations since mean is available + all_Y_i = all_Y_i - mean + all_Y_i_tilde = all_Y_i_tilde - mean + all_Y_i_u = all_Y_i_u - mean + + # compute the variance matrix using all available model evaluations + # shape: (n_outputs, n_outputs) + C = (np.cov(all_Y_i) + np.cov(all_Y_i_u) + np.cov(all_Y_i_tilde)) / 3 + + # compute covariance btw. RVs 'X' and 'Y' + # shape: (2*n_outputs, 2*n_outputs) + # It contains the following 4 block matrices: + # (1, 1) variance of 'X' + # *(1, 2) covariance between 'X' and 'Y' (a.k.a. cross-covariance) + # (2, 1) covariance between 'Y' and 'X' (a.k.a. cross-covariance) + # (2, 2) variance of 'Y' + _cov_2 = np.cov(all_Y_i_u, all_Y_i_tilde) # for total order indices + + # We need the cross-covariance between 'X' and 'Y' + # Extract *(1, 2) (upper right block) + # shape: (n_outputs, n_outputs) + C_u_tilde = _cov_2[0:n_outputs, n_outputs : 2 * n_outputs] + denominator = np.trace(C) + + # Generalised Sobol indices + gen_sobol_total_i[i] = 1 - np.trace(C_u_tilde) / denominator + + return gen_sobol_total_i From 3da45cf5af9360c8dfb7756500000269d0cdd1f2 Mon Sep 17 00:00:00 2001 From: Prateek Bhustali Date: Mon, 9 May 2022 03:24:18 +0200 Subject: [PATCH 24/88] Added documentation for generalised sobol indices --- .../sensitivity/generalised_sobol/README.rst | 12 ++++ docs/source/conf.py | 2 + docs/source/sensitivity/generalised_sobol.rst | 67 +++++++++++++++++++ docs/source/sensitivity/index.rst | 4 +- docs/source/sensitivity/sobol.rst | 12 ++++ 5 files changed, 96 insertions(+), 1 deletion(-) create mode 100644 docs/code/sensitivity/generalised_sobol/README.rst create mode 100644 docs/source/sensitivity/generalised_sobol.rst diff --git a/docs/code/sensitivity/generalised_sobol/README.rst b/docs/code/sensitivity/generalised_sobol/README.rst new file mode 100644 index 000000000..88a5bec5e --- /dev/null +++ b/docs/code/sensitivity/generalised_sobol/README.rst @@ -0,0 +1,12 @@ +Generalised Sobol Sensitivity indices +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +We demonstrate the computation of GSI for 2 examples with multiple outputs: + +1. Mechanical oscillator (analytical solution): Example from [1] page 2 +2. Mechanical oscillator ODE (numerical solution): Example from [2] page 19 +3. Toy example (analytical solution): Example from [2] + +.. [1] Alexanderian, Alen, Gremaud, Pierre A and Smith, Ralph C. Variance-based sensitivity analysis for time-dependent processes. + +.. [2] Gamboa F, Janon A, Klein T, Lagnoux A, others. Sensitivity analysis for multidimensional and functional outputs. Electronic journal of statistics 2014; 8(1): 575-603. \ No newline at end of file diff --git a/docs/source/conf.py b/docs/source/conf.py index 7d113e439..68538001d 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -94,6 +94,7 @@ "../code/sensitivity/sobol", "../code/sensitivity/cramer_von_mises", "../code/sensitivity/chatterjee", + "../code/sensitivity/generalised_sobol", "../code/stochastic_processes/bispectral", "../code/stochastic_processes/karhunen_loeve", "../code/stochastic_processes/spectral", @@ -131,6 +132,7 @@ "auto_examples/sensitivity/sobol", "auto_examples/sensitivity/cramer_von_mises", "auto_examples/sensitivity/chatterjee", + "auto_examples/sensitivity/generalised_sobol", "auto_examples/stochastic_processes/bispectral", "auto_examples/stochastic_processes/karhunen_loeve", "auto_examples/stochastic_processes/spectral", diff --git a/docs/source/sensitivity/generalised_sobol.rst b/docs/source/sensitivity/generalised_sobol.rst new file mode 100644 index 000000000..3515a744b --- /dev/null +++ b/docs/source/sensitivity/generalised_sobol.rst @@ -0,0 +1,67 @@ +Generalised Sobol indices +---------------------------------------- + +A natural generalization of the Sobol indices (that are classically defined for single-output models) for multi-output models. The generalised Sobol indices are computed using the Pick-and-Freeze approach. (For implementation details, see also [1]_.) + +Consider a model :math:`Y=f(X): \mathbb{R}^d \rightarrow \mathbb{R}^k` with :math:`d` inputs :math:`X=\left[ X_{1}, X_{2},ā€¦,X_{d} \right]` and :math:`k` outputs :math:`Y=\left[ Y_{1}, Y_{2},ā€¦,Y_{k} \right]`. + +As the inputs :math:`X_{1}, \ldots, X_{d}` are independent, :math:`f` may be decomposed through the so-called Hoeffding decomposition: + +.. math:: + f(X) = c + f_{\mathbf{u}}\left(X_{\mathbf{u}}\right)+f_{\sim \mathbf{u}}\left(X_{\sim \mathbf{u}}\right) + f_{\mathbf{u}, \sim \mathbf{u}}\left(X_{\mathbf{u}}, X_{\sim \mathbf{u}}\right) + +where :math:`c \in \mathbb{R}^{k}, f_{\mathbf{u}}: E_{\mathbf{u}} \rightarrow \mathbb{R}^{k}, f_{\sim \mathbf{u}}: E_{\sim \mathbf{u}} \rightarrow \mathbb{R}^{k}` and :math:`f_{\mathbf{u}, \sim \mathbf{u}}: E \rightarrow \mathbb{R}^{k}` are given by +:math:`c=\mathbb{E}(Y), f_{\mathbf{u}}=\mathbb{E}\left(Y \mid X_{\mathbf{u}}\right)-c, f_{\sim \mathbf{u}}=\mathbb{E}\left(Y \mid X_{\sim \mathbf{u}}\right)-c, f_{u, \sim \mathbf{u}}=Y-f_{\mathbf{u}}-f_{\sim \mathbf{u}}-c` + +Thanks to :math:`L^{2}`-orthogonality, computing the covariance matrix of both sides of the above equation leads to + +.. math:: + \Sigma = C_{\mathbf{u}}+C_{\sim \mathbf{u}}+C_{\mathbf{u}, \sim \mathbf{u}}. + +Here, :math:`\Sigma, C_{\mathbf{u}}, C_{\sim \mathbf{u}}` and :math:`C_{\mathbf{u}, \sim \mathbf{u}}` are denoting respectively the covariance matrices of :math:`Y, f_{\mathbf{u}}\left(X_{\mathbf{u}}\right), f_{\sim \mathbf{u}}\left(X_{\sim \mathbf{u}}\right)` and :math:`f_{\mathbf{u}, \sim \mathbf{u}}\left(X_{\mathbf{u}}, X_{\sim \mathbf{u}}\right)`. + +The First order generalised Sobol indices can be computed using the Pick-and-Freeze approach as follows, where :math:`\mathbf{u}` is a variable :math:`i` of the independent random variables. + +.. math:: + S_{i, N}=\frac{\operatorname{Tr}\left(C_{i, N}\right)}{\operatorname{Tr}\left(\Sigma_{N}\right)} + +where :math:`C_{\mathbf{i}, N}` and :math:`\Sigma_{N}` are the empirical estimators of :math:`C_{\mathbf{i}}=\operatorname{Cov}\left(Y, Y^{\mathbf{i}}\right)` and :math:`\Sigma=\mathbb{V}[Y]` defined by + +.. math:: + C_{\mathbf{i}, N}=\frac{1}{N} \sum_{j=1}^{N} Y_{j}^{\mathrm{i}} Y_{j}^{t}-\left(\frac{1}{N} \sum_{j=1}^{N} \frac{Y_{j}+Y_{j}^{\mathbf{i}}}{2}\right)\left(\frac{1}{N} \sum_{j=1}^{N} \frac{Y_{j}+Y_{j}^{\mathbf{i}}}{2}\right)^{t} + +and + +.. math:: + \Sigma_{N}=\frac{1}{N} \sum_{j=1}^{N} \frac{Y_{j} Y_{j}^{t}+Y_{j}^{\mathbf{i}}\left(Y_{j}^{\mathbf{i}}\right)^{t}}{2}-\left(\frac{1}{N} \sum_{j=1}^{N} \frac{Y_{j}+Y_{j}^{\mathbf{i}}}{2}\right)\left(\frac{1}{N} \sum_{j=1}^{N} \frac{Y_{j}+Y_{j}^{\mathbf{i}}}{2}\right)^{t} + + +.. [1] Gamboa F, Janon A, Klein T, Lagnoux A, others. Sensitivity analysis for multidimensional and functional outputs. Electronic journal of statistics 2014; 8(1): 575-603.(`Link `_) + + +Generalised Sobol Class +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The :class:`Generalised Sobol` class is imported using the following command: + +>>> from UQpy.sensitivity.generalised_sobol import GeneralisedSobol + +Methods +""""""" + +.. autoclass:: UQpy.sensitivity.GeneralisedSobol + :members: run + +Attributes +"""""""""" +.. autoattribute:: UQpy.sensitivity.GeneralisedSobol.gen_sobol_i +.. autoattribute:: UQpy.sensitivity.GeneralisedSobol.gen_sobol_total_i +.. autoattribute:: UQpy.sensitivity.GeneralisedSobol.n_samples +.. autoattribute:: UQpy.sensitivity.GeneralisedSobol.num_vars + +Examples +"""""""""" + +.. toctree:: + + Generalised Sobol Examples <../auto_examples/sensitivity/generalised_sobol/index> \ No newline at end of file diff --git a/docs/source/sensitivity/index.rst b/docs/source/sensitivity/index.rst index e663eed66..1b2a8367d 100644 --- a/docs/source/sensitivity/index.rst +++ b/docs/source/sensitivity/index.rst @@ -5,8 +5,9 @@ This module contains functionality for all the sampling methods supported in :py The module currently contains the following classes: -- :py:class:`.CramervonMises`: Class to compute Chatterjee sensitivity indices. +- :py:class:`.Chatterjee`: Class to compute Chatterjee sensitivity indices. - :py:class:`.CramervonMises`: Class to compute CramĆ©r-von Mises sensitivity indices. +- :py:class:`.GeneralisedSobol`: Class to compute Generalised Sobol sensitivity indices. - :py:class:`.MorrisSensitivity`: Class to perform Morris. - :py:class:`.PceSensitivity`: Class to compute the sensitivity indices using the :class:`.PolynomialChaosExpansion` method. - :py:class:`.Sobol`: Class to compute Sobol sensitivity indices. @@ -21,6 +22,7 @@ Sensitivity analysis comprises techniques focused on determining how the variati Chatterjee CramĆ©r-von Mises Sensitivity + Generalised Sobol Sensitivity Morris Sensitivity Polynomial Chaos Sensitivity Sobol Sensitivity diff --git a/docs/source/sensitivity/sobol.rst b/docs/source/sensitivity/sobol.rst index 462db15fd..fa4d49265 100644 --- a/docs/source/sensitivity/sobol.rst +++ b/docs/source/sensitivity/sobol.rst @@ -76,6 +76,18 @@ Methods .. autoclass:: UQpy.sensitivity.Sobol :members: run +Attributes +"""""""""" +.. autoattribute:: UQpy.sensitivity.Sobol.sobol_i +.. autoattribute:: UQpy.sensitivity.Sobol.sobol_total_i +.. autoattribute:: UQpy.sensitivity.Sobol.CI_sobol_i +.. autoattribute:: UQpy.sensitivity.Sobol.CI_sobol_total_i +.. autoattribute:: UQpy.sensitivity.Sobol.CI_sobol_ij +.. autoattribute:: UQpy.sensitivity.Sobol.n_samples +.. autoattribute:: UQpy.sensitivity.Sobol.num_vars +.. autoattribute:: UQpy.sensitivity.Sobol.multioutput + + Examples """""""""" From 29b47271e500ed76dfbab00156831536957be1d0 Mon Sep 17 00:00:00 2001 From: Prateek Bhustali Date: Mon, 9 May 2022 03:24:53 +0200 Subject: [PATCH 25/88] Add unit tests for generalised sobol sensitivity --- tests/unit_tests/sensitivity/multioutput.py | 42 +++ .../sensitivity/test_generalised_sobol.py | 315 ++++++++++++++++++ 2 files changed, 357 insertions(+) create mode 100644 tests/unit_tests/sensitivity/multioutput.py create mode 100644 tests/unit_tests/sensitivity/test_generalised_sobol.py diff --git a/tests/unit_tests/sensitivity/multioutput.py b/tests/unit_tests/sensitivity/multioutput.py new file mode 100644 index 000000000..6974d37c0 --- /dev/null +++ b/tests/unit_tests/sensitivity/multioutput.py @@ -0,0 +1,42 @@ +"""" +This is the toy example with multiple outputs from [1]_. + +References +---------- + +.. [1] Gamboa F, Janon A, Klein T, Lagnoux A, others. + Sensitivity analysis for multidimensional and functional outputs. + Electronic journal of statistics 2014; 8(1): 575-603. + +""" + +import numpy as np + + +def evaluate(X): + + """ + + * **Input:** + + * **X** (`ndarray`): + Samples from the input distribution. + Shape: (n_samples, 2) + + * **Output:** + + * **Y** (`ndarray`): + Model evaluations. + Shape: (2, n_samples) + + """ + + n_samples = X.shape[0] + + output = np.zeros((2, n_samples)) + + output[0, :] = X[:, 0] + X[:, 1] + X[:, 0] * X[:, 1] + + output[1, :] = 2 * X[:, 0] + X[:, 1] + 3 * X[:, 0] * X[:, 1] + + return output diff --git a/tests/unit_tests/sensitivity/test_generalised_sobol.py b/tests/unit_tests/sensitivity/test_generalised_sobol.py new file mode 100644 index 000000000..c759d85bb --- /dev/null +++ b/tests/unit_tests/sensitivity/test_generalised_sobol.py @@ -0,0 +1,315 @@ +"""" +This is the test module for the Generalised Sobol indices. + +Here, we will use the toy example from [1]_, which is a multi-output problem. + + +References +---------- + +.. [1] Gamboa F, Janon A, Klein T, Lagnoux A, others. + Sensitivity analysis for multidimensional and functional outputs. + Electronic journal of statistics 2014; 8(1): 575-603. + +Important +---------- +The computed indices are computed using the `np.isclose` function. + +Function signature: + numpy.isclose(a, b, rtol=1e-05, atol=1e-08, equal_nan=False) + + Parameters: + a, b: array_like + Input arrays to compare. + + rtol: float + The relative tolerance parameter. + + atol: float + The absolute tolerance parameter. + +Each element of the `diff` array is compared as follows: +diff = |a - b| +diff <= atol + rtol * abs(b) + +- relative tolerance: rtol * abs(b) + It is the maximum allowed difference between a and b, + relative to the absolute value of b. + For example, to set a tolerance of 1%, pass rol=0.01, + which assures that the values are within 2 decimal places of each other. +- absolute tolerance: atol + When b is close to zero, the atol value is used. + +""" + +import numpy as np +import pytest +import scipy + +from UQpy.run_model.RunModel import RunModel +from UQpy.run_model.model_execution.PythonModel import PythonModel +from UQpy.distributions import Uniform, Normal +from UQpy.distributions.collection.JointIndependent import JointIndependent +from UQpy.sensitivity.generalised_sobol import GeneralisedSobol + +# Prepare +############################################################################### + +# Prepare the input distribution +@pytest.fixture() +def normal_input_dist_object(): + """ + This function returns the input distribution for the toy model. + + X1 ~ Normal(0, 1) + X2 ~ Normal(0, 1) + + """ + return JointIndependent([Normal(0, 1)] * 2) + + +@pytest.fixture() +def uniform_input_dist_object(): + """ + This function returns the input distribution for the toy model. + + X1 ~ Uniform(0, 1) + X2 ~ Uniform(0, 1) + + """ + return JointIndependent([Uniform(0, 1)] * 2) + + +@pytest.fixture() +def toy_model_object(): + """ + This function creates the toy model. + + """ + model = PythonModel( + model_script="multioutput.py", + model_object_name="evaluate", + var_names=[ + "X_1", + "X_2", + ], + delete_files=True, + ) + + runmodel_obj = RunModel(model=model) + + return runmodel_obj + + +@pytest.fixture() +def generalised_sobol_object_normal(normal_input_dist_object, toy_model_object): + """ + This function creates the Generalised Sobol indices object + with normal input distribution. + + """ + + return GeneralisedSobol(toy_model_object, normal_input_dist_object) + + +@pytest.fixture() +def generalised_sobol_object_uniform(uniform_input_dist_object, toy_model_object): + """ + This function creates the Generalised Sobol indices object + with uniform input distribution. + + """ + + return GeneralisedSobol(toy_model_object, uniform_input_dist_object) + + +@pytest.fixture() +def analytical_toy_GSI_normal(): + """ + Analytical first order Generalised Sobol indices + for the toy example with normal input distribution. + """ + + return np.array([0.2941, 0.1176]).reshape(-1, 1) + + +@pytest.fixture() +def analytical_toy_GSI_uniform(): + """ " + Analytical first order Generalised Sobol indices + for toy example with uniform input distribution. + """ + + return np.array([0.6084, 0.3566]).reshape(-1, 1) + + +@pytest.fixture() +def pick_and_freeze_toy_GSI_normal(generalised_sobol_object_normal): + """ " + Generalised first order Sobol indices computed using the Pick and Freeze + approach for the toy example with normal input distribution. + """ + + SA = generalised_sobol_object_normal + + np.random.seed(12345) #! set seed for reproducibility + + computed_indices = SA.run(n_samples=100_000) + + return computed_indices["gen_sobol_i"] + + +@pytest.fixture() +def pick_and_freeze_toy_GSI_uniform(generalised_sobol_object_uniform): + """ " + Generalised first order Sobol indices computed using the Pick and Freeze + approach for the toy example with uniform input distribution. + """ + + SA = generalised_sobol_object_uniform + + np.random.seed(12345) #! set seed for reproducibility + + computed_indices = SA.run(n_samples=100_000) + + return computed_indices["gen_sobol_i"] + + +@pytest.fixture() +def NUM_SAMPLES(): + """This function returns the number of samples for bootstrapping""" + + num_bootstrap_samples = 500 + num_samples = 20_000 + + return num_bootstrap_samples, num_samples + + +@pytest.fixture() +def bootstrap_generalised_sobol_index_variance( + generalised_sobol_object_normal, NUM_SAMPLES +): + + SA = generalised_sobol_object_normal + + np.random.seed(12345) #! set seed for reproducibility + + num_bootstrap_samples, n_samples = NUM_SAMPLES + + confidence_level = 0.95 + delta = -scipy.stats.norm.ppf((1 - confidence_level) / 2) + + # Compute the confidence intervals + + computed_indices = SA.run( + n_samples=n_samples, + num_bootstrap_samples=num_bootstrap_samples, + confidence_level=confidence_level, + ) + + gen_sobol_i = computed_indices["gen_sobol_i"].ravel() + gen_sobol_total_i = computed_indices["gen_sobol_total_i"].ravel() + upper_bound_first_order = computed_indices["CI_gen_sobol_i"][:, 1] + upper_bound_total_order = computed_indices["CI_gen_sobol_total_i"][:, 1] + + std_bootstrap_first_order = (upper_bound_first_order - gen_sobol_i) / delta + std_bootstrap_total_order = (upper_bound_total_order - gen_sobol_total_i) / delta + + return std_bootstrap_first_order**2, std_bootstrap_total_order**2 + + +@pytest.fixture() +def model_eval_generalised_sobol_index_variance(): + + """ + For computational efficiency, the variance of the generalised Sobol indices + is precomputed using model evaluations with + NUM_SAMPLES (num_repetitions=500, num_samples=20_000) + + Copy-paste the following code to generate the variance + of the Sobol indices: + + runmodel_obj = RunModel(model_script='multioutput.py', + model_object_name='multioutput_toy', + vec=True, delete_files=True) + + dist_object_1 = JointInd([Normal(0, 1)]*2) + + SA = GeneralisedSobol(runmodel_obj, dist_object_1) + + np.random.seed(12345) # for reproducibility + + num_repetitions, n_samples = 500, 20_000 + + num_vars = 2 + + bootstrap_first_order = np.zeros((num_vars, num_bootstrap_samples)) + bootstrap_total_order = np.zeros((num_vars, num_bootstrap_samples)) + + for b in range(num_repetitions): + + computed_indices = SA.run(n_samples=n_samples) + + bootstrap_first_order[:, b] = computed_indices["gen_sobol_i"].ravel() + bootstrap_total_order[:, b] = computed_indices["gen_sobol_total_i"].ravel() + + var_bootstrap_gen_S = np.var(bootstrap_first_order, axis=1, ddof=1) + var_bootstrap_gen_S_T = np.var(bootstrap_total_order, axis=1, ddof=1) + + print(var_bootstrap_gen_S) + print(var_bootstrap_gen_S_T) + + """ + + variance_first_order = np.array([0.00011284, 0.00012608]) + + variance_total_order = np.array([0.00012448, 0.00011208]) + + return variance_first_order, variance_total_order + + +# Unit tests +############################################################################### + + +def test_pick_and_freeze_estimator( + pick_and_freeze_toy_GSI_normal, + analytical_toy_GSI_normal, + pick_and_freeze_toy_GSI_uniform, + analytical_toy_GSI_uniform, +): + """ + Test the pick and freeze estimator. + + """ + + # Prepare + N_true = analytical_toy_GSI_normal + N_estimate = pick_and_freeze_toy_GSI_normal + + U_true = analytical_toy_GSI_uniform + U_estimate = pick_and_freeze_toy_GSI_uniform + + # Act + # Idea: Measure accuracy upto 2 decimal places -> rtol=0, atol=1e-2 + assert np.isclose(N_estimate, N_true, rtol=0, atol=1e-2).all() + assert np.isclose(U_estimate, U_true, rtol=0, atol=1e-2).all() + + +def test_bootstrap_variance_computation( + model_eval_generalised_sobol_index_variance, + bootstrap_generalised_sobol_index_variance, +): + + """Test the bootstrap variance computation.""" + + # Prepare + var_first, var_total = model_eval_generalised_sobol_index_variance + boot_var_first, boot_var_total = bootstrap_generalised_sobol_index_variance + + # Act + assert var_first.shape == boot_var_first.shape + + # Idea: Ensure bootstrap variance and MC variance are of same order -> rtol=0, atol=1e-4 + assert np.isclose(boot_var_first, var_first, rtol=0, atol=1e-4).all() + assert np.isclose(boot_var_total, var_total, rtol=0, atol=1e-4).all() From f15c7bb4cb6f88b1311698b3f1180806cdb0d67b Mon Sep 17 00:00:00 2001 From: Prateek Bhustali Date: Mon, 9 May 2022 03:25:12 +0200 Subject: [PATCH 26/88] Added examples generalised sobol sensitivity --- .../local_mechanical_oscillator_ODE.py | 60 ++++++++++++++ .../generalised_sobol/local_multioutput.py | 42 ++++++++++ ...alised_sobol_mechcanical_oscillator_ODE.py | 68 ++++++++++++++++ .../plot_generalised_sobol_multioutput.py | 78 +++++++++++++++++++ 4 files changed, 248 insertions(+) create mode 100644 docs/code/sensitivity/generalised_sobol/local_mechanical_oscillator_ODE.py create mode 100644 docs/code/sensitivity/generalised_sobol/local_multioutput.py create mode 100644 docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_mechcanical_oscillator_ODE.py create mode 100644 docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_multioutput.py diff --git a/docs/code/sensitivity/generalised_sobol/local_mechanical_oscillator_ODE.py b/docs/code/sensitivity/generalised_sobol/local_mechanical_oscillator_ODE.py new file mode 100644 index 000000000..13b28c9fa --- /dev/null +++ b/docs/code/sensitivity/generalised_sobol/local_mechanical_oscillator_ODE.py @@ -0,0 +1,60 @@ +""" + +Auxiliary file +============================================== + +""" + +import numpy as np +from scipy.integrate import solve_ivp + + +def mech_oscillator(input_parameters): + """ + We have the second order differential equation: + + .. math:: + + m \ddot{x} + c \dot{x} + k x = 0 + + with initial conditions: :math: `x(0) = \ell`, :math: `\dot{x}(0) = 0`. + + where, for example :math: `m \sim \mathcal{U}(10, 12)`, + :math: `c \sim \mathcal{U}(0.4, 0.8)` + :math: `k \sim \mathcal{U}(70, 90)` + :math: `\ell \sim \mathcal{U}(-1, -0.25)`. + + + References + ---------- + + .. [1] Gamboa F, Janon A, Klein T, Lagnoux A, others . + Sensitivity analysis for multidimensional and functional outputs. + Electronic journal of statistics 2014; 8(1): 575-603. + + """ + + # unpack the input parameters + m, c, k, l = input_parameters[0] + + # intial conditions + x_0 = l + v_0 = 0 + + # time points + t_0 = 0 + t_f = 40 + dt = 0.05 + n_t = int((t_f - t_0) / dt) + T = np.linspace(t_0, t_f, n_t) + + def ODE(t, y): + """ + The ODE system. + """ + return np.array([y[1], -(k / m) * y[0] - (c / m) * y[1]]) + + # solve the ODE + sol = solve_ivp(ODE, [t_0, t_f], [x_0, v_0], method="RK45", t_eval=T) + + return sol.y[0] diff --git a/docs/code/sensitivity/generalised_sobol/local_multioutput.py b/docs/code/sensitivity/generalised_sobol/local_multioutput.py new file mode 100644 index 000000000..6974d37c0 --- /dev/null +++ b/docs/code/sensitivity/generalised_sobol/local_multioutput.py @@ -0,0 +1,42 @@ +"""" +This is the toy example with multiple outputs from [1]_. + +References +---------- + +.. [1] Gamboa F, Janon A, Klein T, Lagnoux A, others. + Sensitivity analysis for multidimensional and functional outputs. + Electronic journal of statistics 2014; 8(1): 575-603. + +""" + +import numpy as np + + +def evaluate(X): + + """ + + * **Input:** + + * **X** (`ndarray`): + Samples from the input distribution. + Shape: (n_samples, 2) + + * **Output:** + + * **Y** (`ndarray`): + Model evaluations. + Shape: (2, n_samples) + + """ + + n_samples = X.shape[0] + + output = np.zeros((2, n_samples)) + + output[0, :] = X[:, 0] + X[:, 1] + X[:, 0] * X[:, 1] + + output[1, :] = 2 * X[:, 0] + X[:, 1] + 3 * X[:, 0] * X[:, 1] + + return output diff --git a/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_mechcanical_oscillator_ODE.py b/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_mechcanical_oscillator_ODE.py new file mode 100644 index 000000000..62a19d96d --- /dev/null +++ b/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_mechcanical_oscillator_ODE.py @@ -0,0 +1,68 @@ +r""" + +Mechanical oscillator model (multioutput) +============================================== + +The mechanical oscillator is governed by the following second-order ODE: + +.. math:: + m \ddot{x} + c \dot{x} + k x = 0 + +.. math:: + x(0) = \ell, \dot{x}(0) = 0. + +The parameteres of the oscillator are modeled as follows: + +.. math:: + m \sim \mathcal{U}(10, 12), c \sim \mathcal{U}(0.4, 0.8), k \sim \mathcal{U}(70, 90), \ell \sim \mathcal{U}(-1, -0.25). + +""" + +# %% +import numpy as np + +from UQpy.run_model.RunModel import RunModel +from UQpy.run_model.model_execution.PythonModel import PythonModel +from UQpy.distributions import Uniform, Normal +from UQpy.distributions.collection.JointIndependent import JointIndependent +from UQpy.sensitivity.generalised_sobol import GeneralisedSobol + +# %% +# Create Model object +model = PythonModel( + model_script="local_mechanical_oscillator_ODE.py", + model_object_name="mech_oscillator", + var_names=[r"$m$", "$c$", "$k$", "$\ell$"], + delete_files=True, +) + +runmodel_obj = RunModel(model=model) + +# Define distribution object +M = Uniform(10, (12 - 10)) +C = Uniform(0.4, (0.8 - 0.4)) +K = Uniform(70, (90 - 70)) +L = Uniform(-1, (-0.25 - -1)) +dist_object = JointIndependent([M, C, K, L]) + +# %% +SA = GeneralisedSobol(runmodel_obj, dist_object) + +computed_indices = SA.run(n_samples=500) + +# %% [markdown] +# Expected generalised Sobol indices: +# +# $GS_{m}$ = 0.0826 +# +# $GS_{c}$ = 0.0020 +# +# $GS_{k}$ = 0.2068 +# +# $GS_{\ell}$ = 0.0561 + +# %% +computed_indices["gen_sobol_i"] + +# %% +computed_indices["gen_sobol_total_i"] diff --git a/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_multioutput.py b/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_multioutput.py new file mode 100644 index 000000000..d89cfec12 --- /dev/null +++ b/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_multioutput.py @@ -0,0 +1,78 @@ +r""" + +Toy multioutput function +============================================== + +.. math:: + Y = f (X_{1}, X_{2}) := \left(\begin{array}{c} + X_{1}+X_{2}+X_{1} X_{2} \\ + 2 X_{1}+3 X_{1} X_{2}+X_{2} + \end{array}\right) + +.. math:: + \text{case 1: } X_1, X_2 \sim \mathcal{U}(0, 1) + +.. math:: + \text{case 2: } X_1, X_2 \sim \mathcal{N}(0, 1) + +""" + +# %% +from UQpy.run_model.RunModel import RunModel +from UQpy.run_model.model_execution.PythonModel import PythonModel +from UQpy.distributions import Uniform, Normal +from UQpy.distributions.collection.JointIndependent import JointIndependent +from UQpy.sensitivity.generalised_sobol import GeneralisedSobol + +# %% +# Create Model object +model = PythonModel( + model_script="local_multioutput.py", + model_object_name="evaluate", + var_names=[r"X_1$", r"X_2"], + delete_files=True, +) + +runmodel_obj = RunModel(model=model) + +# Define distribution object +dist_object_1 = JointIndependent([Normal(0, 1)] * 2) +dist_object_2 = JointIndependent([Uniform(0, 1)] * 2) + +# %% +SA = GeneralisedSobol(runmodel_obj, dist_object_1) + +computed_indices = SA.run( + n_samples=20_000, confidence_level=0.95, num_bootstrap_samples=5_00 +) + +# %% [markdown] +# Gaussian case +# +# $S_1$ = 0.2941 +# +# $S_2$ = 0.1179 + +# %% +computed_indices["gen_sobol_i"] + +# %% +computed_indices["gen_sobol_total_i"] + +# %% +SA = GeneralisedSobol(runmodel_obj, dist_object_2) + +computed_indices = SA.run(n_samples=100_000) + +# %% [markdown] +# Gaussian case +# +# $S_1$ = 0.6084 +# +# $S_2$ = 0.3566 + +# %% +computed_indices["gen_sobol_i"] + +# %% +computed_indices["gen_sobol_total_i"] From 21c02896e2ca5947470f95dc160bb2ca82e7ee85 Mon Sep 17 00:00:00 2001 From: Prateek Bhustali Date: Mon, 9 May 2022 17:29:37 +0200 Subject: [PATCH 27/88] Improved documentation for Chatterjee indices + Fomatting + Detailed descriptions --- docs/code/sensitivity/chatterjee/README.rst | 16 +++++--- .../chatterjee/plot_chatterjee_exponential.py | 26 +++++++++---- .../chatterjee/plot_chatterjee_ishigami.py | 28 ++++++++++++-- .../chatterjee/plot_chatterjee_sobol_func.py | 38 ++++++++++++++----- .../sensitivity/sobol/plot_sobol_ishigami.py | 14 ++++++- docs/source/sensitivity/chatterjee.rst | 2 +- 6 files changed, 97 insertions(+), 27 deletions(-) diff --git a/docs/code/sensitivity/chatterjee/README.rst b/docs/code/sensitivity/chatterjee/README.rst index 590eee2a7..540581862 100644 --- a/docs/code/sensitivity/chatterjee/README.rst +++ b/docs/code/sensitivity/chatterjee/README.rst @@ -2,14 +2,18 @@ Chatterjee indices ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ These examples serve as a guide for using the Chatterjee sensitivity module. They have been taken from various papers to enable validation of the implementation and have been referenced accordingly. -1. Ishigami function +1. **Ishigami function** -2. Exponential function + In addition to the Pick and Freeze scheme, the Sobol indices can be estimated using the rank statistics approach [2]_. We demonstrate this estimation of the Sobol indices using the Ishigami function. -For the Exponential model, analytical Cramer-von Mises indices are available, since they are equivalent to the Chatterjee indices, they are shown here. +2. **Exponential function** -3. Sobol function + For the Exponential model, analytical CramĆ©r-von Mises indices are available [1]_ and since they are equivalent to the Chatterjee indices in the sample limit, they are shown here. -This example was considered in [1] page 18. +3. **Sobol function** -.. [1] Fabrice Gamboa, Pierre Gremaud, Thierry Klein, and AgnĆØs Lagnoux. (2020). Global Sensitivity Analysis: a new generation of mighty estimators based on rank statistics. \ No newline at end of file + This example was considered in [2]_ (page 18) to compare the Pick and Freeze scheme with the rank statistics approach for estimating the Sobol indices. + +.. [1] Gamboa, F., Klein, T., & Lagnoux, A. (2018). Sensitivity Analysis Based on CramĆ©r-von Mises Distance. SIAM/ASA Journal on Uncertainty Quantification, 6(2), 522-548. doi:10.1137/15M1025621. (`Link `_) + +.. [2] Fabrice Gamboa, Pierre Gremaud, Thierry Klein, and AgnĆØs Lagnoux. (2020). Global Sensitivity Analysis: a new generation of mighty estimators based on rank statistics. diff --git a/docs/code/sensitivity/chatterjee/plot_chatterjee_exponential.py b/docs/code/sensitivity/chatterjee/plot_chatterjee_exponential.py index 2922b97af..81d752653 100644 --- a/docs/code/sensitivity/chatterjee/plot_chatterjee_exponential.py +++ b/docs/code/sensitivity/chatterjee/plot_chatterjee_exponential.py @@ -3,9 +3,17 @@ Exponential function ============================================== +The exponential function was used in [1]_ to demonstrate the +CramĆ©r-von Mises indices. Chattererjee indices approach the CramĆ©r-von Mises +indices in the sample limit and will be demonstrated via this example. + .. math:: f(x) := \exp(x_1 + 2x_2), \quad x_1, x_2 \sim \mathcal{N}(0, 1) +.. [1] Gamboa, F., Klein, T., & Lagnoux, A. (2018). Sensitivity Analysis Based on \ +CramĆ©r-von Mises Distance. SIAM/ASA Journal on Uncertainty Quantification, 6(2), \ +522-548. doi:10.1137/15M1025621. (`Link `_) + """ # %% @@ -15,7 +23,9 @@ from UQpy.distributions.collection.JointIndependent import JointIndependent from UQpy.sensitivity.chatterjee import Chatterjee -# %% +# %% [markdown] +# **Define the model and input distributions** + # Create Model object model = PythonModel( model_script="local_exponential.py", @@ -33,22 +43,24 @@ dist_object = JointIndependent([Normal(0, 1)] * 2) # %% [markdown] -# Compute Chatterjee indices +# **Compute Chatterjee indices** -# %% +# %% [markdown] SA = Chatterjee(runmodel_obj, dist_object) -# Compute Sobol indices using the pick and freeze algorithm +# Compute Chatterjee indices using the pick and freeze algorithm computed_indices = SA.run(n_samples=1_000_000) # %% [markdown] -# Cramer-von Mises sensitivity analysis +# **Chattererjee indices** +# +# Chattererjee indices approach the CramĆ©r-von Mises indices in the sample limit. # # Expected value of the sensitivity indices: # -# $S^1_{CVM} = \frac{6}{\pi} \operatorname{arctan}(2) - 2 \approx 0.1145$ +# :math:`S^1_{CVM} = \frac{6}{\pi} \operatorname{arctan}(2) - 2 \approx 0.1145` # -# $S^2_{CVM} = \frac{6}{\pi} \operatorname{arctan}(\sqrt{19}) - 2 \approx 0.5693$ +# :math:`S^2_{CVM} = \frac{6}{\pi} \operatorname{arctan}(\sqrt{19}) - 2 \approx 0.5693` # %% computed_indices["chatterjee_i"] diff --git a/docs/code/sensitivity/chatterjee/plot_chatterjee_ishigami.py b/docs/code/sensitivity/chatterjee/plot_chatterjee_ishigami.py index 66897d670..d3759fc10 100644 --- a/docs/code/sensitivity/chatterjee/plot_chatterjee_ishigami.py +++ b/docs/code/sensitivity/chatterjee/plot_chatterjee_ishigami.py @@ -3,6 +3,9 @@ Ishigami function ============================================== +The ishigami function is a non-linear, non-monotonic function that is commonly used to +benchmark uncertainty and senstivity analysis methods. + .. math:: f(x_1, x_2, x_3) = sin(x_1) + a \cdot sin^2(x_2) + b \cdot x_3^4 sin(x_1) @@ -20,7 +23,9 @@ from UQpy.distributions.collection.JointIndependent import JointIndependent from UQpy.sensitivity.chatterjee import Chatterjee -# %% +# %% [markdown] +# **Define the model and input distributions** + # Create Model object model = PythonModel( model_script="local_ishigami.py", @@ -36,9 +41,9 @@ dist_object = JointIndependent([Uniform(-np.pi, 2 * np.pi)] * 3) # %% [markdown] -# Compute Chatterjee indices +# **Compute Chatterjee indices** -# %% +# %% [markdown] SA = Chatterjee(runmodel_obj, dist_object) computed_indices = SA.run( @@ -48,11 +53,28 @@ confidence_level=0.95, ) +# %% [markdown] +# **Chattererjee indices** + # %% computed_indices["chatterjee_i"] +# %% [markdown] +# **Confidence intervals for the Chatterjee indices** + # %% computed_indices["CI_chatterjee_i"] +# %% [markdown] +# **Estimated Sobol indices** +# +# Expected first order Sobol indices: +# +# :math:`S_1`: 0.3139 +# +# :math:`S_2`: 0.4424 +# +# :math:`S_3`: 0.0 + # %% computed_indices["sobol_i"] diff --git a/docs/code/sensitivity/chatterjee/plot_chatterjee_sobol_func.py b/docs/code/sensitivity/chatterjee/plot_chatterjee_sobol_func.py index 578131426..0169597e1 100644 --- a/docs/code/sensitivity/chatterjee/plot_chatterjee_sobol_func.py +++ b/docs/code/sensitivity/chatterjee/plot_chatterjee_sobol_func.py @@ -3,6 +3,15 @@ Sobol function ============================================== +The Sobol function is non-linear function that is commonly used to benchmark uncertainty +and senstivity analysis methods. Unlike the ishigami function which has 3 input +variables, the Sobol function can have any number of input variables. + +This function was used in [1]_ to compare the Pick and Freeze approach and the rank +statistics approach to estimating Sobol indices. The rank statistics approach was +observed to be more accurate than the Pick and Freeze approach and it also provides +better estimates when only a small number of model evaluations are available. + .. math:: g(x_1, x_2, \ldots, x_D) := \prod_{i=1}^{D} \frac{|4x_i - 2| + a_i}{1 + a_i}, @@ -12,6 +21,8 @@ .. math:: x_i \sim \mathcal{U}(0, 1), \quad a_i \in \mathbb{R}. +.. [1] Fabrice Gamboa, Pierre Gremaud, Thierry Klein, and AgnĆØs Lagnoux. (2020). Global Sensitivity Analysis: a new generation of mighty estimators based on rank statistics. + """ # %% @@ -23,7 +34,9 @@ from UQpy.distributions.collection.JointIndependent import JointIndependent from UQpy.sensitivity.chatterjee import Chatterjee -# %% +# %% [markdown] +# **Define the model and input distributions** + # Create Model object num_vars = 6 a_vals = np.array([0.0, 0.5, 3.0, 9.0, 99.0, 99.0]) @@ -42,29 +55,36 @@ dist_object = JointIndependent([Uniform(0, 1)] * num_vars) # %% [markdown] -# Compute Chatterjee indices +# **Compute Chatterjee indices** -# %% +# %% [markdown] SA = Chatterjee(runmodel_obj, dist_object) # Compute Sobol indices using the pick and freeze algorithm computed_indices = SA.run(n_samples=500_000, estimate_sobol_indices=True) +# %% [markdown] +# **Chatterjee indices** + # %% computed_indices["chatterjee_i"] # %% [markdown] -# $S_1$ = 5.86781190e-01 +# **Estimated Sobol indices** +# +# Expected first order Sobol indices: +# +# :math:`S_1` = 5.86781190e-01 # -# $S_2$ = 2.60791640e-01 +# :math:`S_2` = 2.60791640e-01 # -# $S_3$ = 3.66738244e-02 +# :math:`S_3` = 3.66738244e-02 # -# $S_4$ = 5.86781190e-03 +# :math:`S_4` = 5.86781190e-03 # -# $S_5$ = 5.86781190e-05 +# :math:`S_5` = 5.86781190e-05 # -# $S_6$ = 5.86781190e-05 +# :math:`S_6` = 5.86781190e-05 # %% computed_indices["sobol_i"] diff --git a/docs/code/sensitivity/sobol/plot_sobol_ishigami.py b/docs/code/sensitivity/sobol/plot_sobol_ishigami.py index a448a61b4..f067aa4b3 100644 --- a/docs/code/sensitivity/sobol/plot_sobol_ishigami.py +++ b/docs/code/sensitivity/sobol/plot_sobol_ishigami.py @@ -3,6 +3,9 @@ Ishigami function ============================================== +The ishigami function is a non-linear, non-monotonic function that is commonly used in +uncertainty and senstivity analysis methods. + .. math:: f(x_1, x_2, x_3) = sin(x_1) + a \cdot sin^2(x_2) + b \cdot x_3^4 sin(x_1) @@ -44,6 +47,9 @@ from UQpy.distributions.collection.JointIndependent import JointIndependent from UQpy.sensitivity.sobol import Sobol +# %% [markdown] +# **Define the model and input distributions** + # %% # Create Model object model = PythonModel( @@ -59,12 +65,17 @@ # Define distribution object dist_object = JointIndependent([Uniform(-np.pi, 2 * np.pi)] * 3) +# %% [markdown] +# **Compute Sobol indices** + # %% SA = Sobol(runmodel_obj, dist_object) computed_indices = SA.run(n_samples=100_000, num_bootstrap_samples=100) # %% [markdown] +# **Sobol indices** +# # Expected first order Sobol indices: # # X1: 0.3139 @@ -72,12 +83,13 @@ # X2: 0.4424 # # X3: 0.0 -# # %% computed_indices["sobol_i"] # %% [markdown] +# **Total order Sobol indices** +# # Expected total order Sobol indices: # # X1: 0.55758886 diff --git a/docs/source/sensitivity/chatterjee.rst b/docs/source/sensitivity/chatterjee.rst index 88f77889b..e2e8ce006 100644 --- a/docs/source/sensitivity/chatterjee.rst +++ b/docs/source/sensitivity/chatterjee.rst @@ -9,7 +9,7 @@ Consider :math:`n` samples of random variables :math:`X` and :math:`Y`, with :ma \xi_{n}(X, Y):=1-\frac{3 \sum_{i=1}^{n-1}\left|r_{i+1}-r_{i}\right|}{n^{2}-1} -The Chatterjee index converges for :math:`n \rightarrow \infty` to the CramĆ©r-von Mises index and is faster to estimate than using the Pick and Freeze approach in the Cramer-von Mises index. +The Chatterjee index converges for :math:`n \rightarrow \infty` to the CramĆ©r-von Mises index and is faster to estimate than using the Pick and Freeze approach in the CramĆ©r-von Mises index. .. [1] Sourav Chatterjee (2021) A New Coefficient of Correlation, Journal of the American Statistical Association, 116:536, 2009-2022, DOI: 10.1080/01621459.2020.1758115 (`Link `_) From 86dcd1aacc8a40869247f2866cde36814f25cf77 Mon Sep 17 00:00:00 2001 From: Prateek Bhustali Date: Mon, 9 May 2022 18:05:44 +0200 Subject: [PATCH 28/88] Improved documentation for CramervonMises indices + Fomatting + Detailed descriptions --- .../sensitivity/cramer_von_mises/README.rst | 10 ++++++ .../cramer_von_mises/plot_cvm_exponential.py | 32 +++++++++++++++---- .../cramer_von_mises/plot_cvm_sobol_func.py | 31 ++++++++++++------ docs/source/sensitivity/cramer_von_mises.rst | 2 +- 4 files changed, 59 insertions(+), 16 deletions(-) diff --git a/docs/code/sensitivity/cramer_von_mises/README.rst b/docs/code/sensitivity/cramer_von_mises/README.rst index ea5f804b6..b87758792 100644 --- a/docs/code/sensitivity/cramer_von_mises/README.rst +++ b/docs/code/sensitivity/cramer_von_mises/README.rst @@ -1,3 +1,13 @@ CramĆ©r-von Mises Sensitivity indices ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +These examples serve as a guide for using the CramĆ©r-von Mises sensitivity module. They have been taken from various papers to enable validation of the implementation and have been referenced accordingly. +1. **Exponential function** + + For the Exponential model, analytical CramĆ©r-von Mises indices are available [1]_. + +2. **Sobol function** + + The CramĆ©r-von Mises indices are computed using the Pick and Freeze approach [1]_. These model evaluations can be used to estimate the Sobol indices as well. We demonstrate this using the Sobol function. + +.. [1] Gamboa, F., Klein, T., & Lagnoux, A. (2018). Sensitivity Analysis Based on CramĆ©r-von Mises Distance. SIAM/ASA Journal on Uncertainty Quantification, 6(2), 522-548. doi:10.1137/15M1025621. (`Link `_) \ No newline at end of file diff --git a/docs/code/sensitivity/cramer_von_mises/plot_cvm_exponential.py b/docs/code/sensitivity/cramer_von_mises/plot_cvm_exponential.py index 244fd7805..e6949a71b 100644 --- a/docs/code/sensitivity/cramer_von_mises/plot_cvm_exponential.py +++ b/docs/code/sensitivity/cramer_von_mises/plot_cvm_exponential.py @@ -3,9 +3,16 @@ Exponential function ============================================== +The exponential function was used in [1]_ to demonstrate the +CramĆ©r-von Mises indices. + .. math:: f(x) := \exp(x_1 + 2x_2), \quad x_1, x_2 \sim \mathcal{N}(0, 1) +.. [1] Gamboa, F., Klein, T., & Lagnoux, A. (2018). Sensitivity Analysis Based on \ +CramĆ©r-von Mises Distance. SIAM/ASA Journal on Uncertainty Quantification, 6(2), \ +522-548. doi:10.1137/15M1025621. (`Link `_) + """ # %% @@ -15,7 +22,9 @@ from UQpy.distributions.collection.JointIndependent import JointIndependent from UQpy.sensitivity.cramer_von_mises import CramervonMises as cvm -# %% +# %% [markdown] +# **Define the model and input distributions** + # Create Model object model = PythonModel( model_script="local_exponential.py", @@ -30,29 +39,40 @@ dist_object = JointIndependent([Normal(0, 1)] * 2) # %% [markdown] -# Compute Cramer-von Mises indices +# **Compute CramĆ©r-von Mises indices** # %% -# create cvm object SA = cvm(runmodel_obj, dist_object) # Compute Sobol indices using the pick and freeze algorithm computed_indices = SA.run(n_samples=20_000, estimate_sobol_indices=True) # %% [markdown] -# Cramer-von Mises sensitivity analysis +# **CramĆ©r-von Mises indices** # # Expected value of the sensitivity indices: # -# $S^1_{CVM} = \frac{6}{\pi} \operatorname{arctan}(2) - 2 \approx 0.1145$ +# :math:`S^1_{CVM} = \frac{6}{\pi} \operatorname{arctan}(2) - 2 \approx 0.1145` # -# $S^2_{CVM} = \frac{6}{\pi} \operatorname{arctan}(\sqrt{19}) - 2 \approx 0.5693$ +# :math:`S^2_{CVM} = \frac{6}{\pi} \operatorname{arctan}(\sqrt{19}) - 2 \approx 0.5693` # %% computed_indices["CVM_i"] +# %% [markdown] +# **Estimated first order Sobol indices** +# +# Expected first order Sobol indices: +# +# :math:`S_1` = 0.0118 +# +# :math:`S_2` = 0.3738 + # %% computed_indices["sobol_i"] +# %% [markdown] +# **Estimated total order Sobol indices** + # %% computed_indices["sobol_total_i"] diff --git a/docs/code/sensitivity/cramer_von_mises/plot_cvm_sobol_func.py b/docs/code/sensitivity/cramer_von_mises/plot_cvm_sobol_func.py index da17e3e2f..ff86ab30e 100644 --- a/docs/code/sensitivity/cramer_von_mises/plot_cvm_sobol_func.py +++ b/docs/code/sensitivity/cramer_von_mises/plot_cvm_sobol_func.py @@ -3,6 +3,10 @@ Sobol function ============================================== +The Sobol function is non-linear function that is commonly used to benchmark uncertainty +and senstivity analysis methods. Unlike the ishigami function which has 3 input +variables, the Sobol function can have any number of input variables. + .. math:: g(x_1, x_2, \ldots, x_D) := \prod_{i=1}^{D} \frac{|4x_i - 2| + a_i}{1 + a_i}, @@ -23,10 +27,12 @@ from UQpy.distributions.collection.JointIndependent import JointIndependent from UQpy.sensitivity.cramer_von_mises import CramervonMises as cvm -# %% +# %% [markdown] +# **Define the model and input distributions** + # Create Model object num_vars = 6 -a_vals = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]) +a_vals = np.array([0.0, 0.5, 3.0, 9.0, 99.0, 99.0]) model = PythonModel( model_script="local_sobol_func.py", @@ -41,30 +47,37 @@ # Define distribution object dist_object = JointIndependent([Uniform(0, 1)] * num_vars) +# %% [markdown] +# **Compute CramĆ©r-von Mises indices** + # %% SA = cvm(runmodel_obj, dist_object) # Compute Sobol indices using the pick and freeze algorithm computed_indices = SA.run(n_samples=20_000, estimate_sobol_indices=True) +# %% [markdown] +# **CramĆ©r-von Mises indices** + # %% computed_indices["CVM_i"] # %% [markdown] -# Sobol indices computed analytically +# **Estimated Sobol indices** # -# $S_1$ = 0.46067666 +# Expected first order Sobol indices: # -# $S_2$ = 0.20474518 +# :math:`S_1` = 5.86781190e-01 # -# $S_3$ = 0.11516917 +# :math:`S_2` = 2.60791640e-01 # -# $S_4$ = 0.07370827 +# :math:`S_3` = 3.66738244e-02 # -# $S_5$ = 0.0511863 +# :math:`S_4` = 5.86781190e-03 # -# $S_6$ = 0.03760626 +# :math:`S_5` = 5.86781190e-05 # +# :math:`S_6` = 5.86781190e-05 # %% computed_indices["sobol_i"] diff --git a/docs/source/sensitivity/cramer_von_mises.rst b/docs/source/sensitivity/cramer_von_mises.rst index ccd412af9..1958c128a 100644 --- a/docs/source/sensitivity/cramer_von_mises.rst +++ b/docs/source/sensitivity/cramer_von_mises.rst @@ -1,7 +1,7 @@ CramĆ©r-von Mises indices ---------------------------------------- -A sensitivity index based on the CramĆ©r-von Mises distance. In contrast to variance based Sobol indices it takes into account the whole distribution of the model output and is therefore considered as a moment-free method [1]_. Furthermore the index can be naturally extended to multivariate model outputs (not implemented yet in UQPy). +A sensitivity index based on the CramĆ©r-von Mises distance. In contrast to the variance based Sobol indices, it takes into account the whole distribution of the model output and is therefore considered as a moment-free method [1]_. Furthermore the index can be naturally extended to multivariate model outputs (not implemented yet in UQPy). Consider a model :math:`Y=f(X): \mathbb{R}^d \rightarrow \mathbb{R}^k` with :math:`d` inputs :math:`X_{(1)}, X_{(2)}, \ldots, X_{(d)}` and :math:`k` outputs :math:`Y_{(1)}, Y_{(2)}, \ldots, Y_{(k)}`. We define the cumulative distribution function :math:`F(t)` of :math:`Y` as: From d48552cddd22fc0181baa1135e83fba27c920873 Mon Sep 17 00:00:00 2001 From: Prateek Bhustali Date: Mon, 9 May 2022 18:47:11 +0200 Subject: [PATCH 29/88] Improved documentation GeneralisedSobol indices + Fomatting + Detailed descriptions --- .../sensitivity/generalised_sobol/README.rst | 14 ++++--- ...alised_sobol_mechanical_oscillator_ODE.py} | 26 ++++++++++--- .../plot_generalised_sobol_multioutput.py | 39 +++++++++++++------ docs/source/sensitivity/generalised_sobol.rst | 17 ++++++-- 4 files changed, 70 insertions(+), 26 deletions(-) rename docs/code/sensitivity/generalised_sobol/{plot_generalised_sobol_mechcanical_oscillator_ODE.py => plot_generalised_sobol_mechanical_oscillator_ODE.py} (71%) diff --git a/docs/code/sensitivity/generalised_sobol/README.rst b/docs/code/sensitivity/generalised_sobol/README.rst index 88a5bec5e..78ede7984 100644 --- a/docs/code/sensitivity/generalised_sobol/README.rst +++ b/docs/code/sensitivity/generalised_sobol/README.rst @@ -1,12 +1,14 @@ Generalised Sobol Sensitivity indices ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -We demonstrate the computation of GSI for 2 examples with multiple outputs: +These examples serve as a guide for using the GSI sensitivity module. They have been taken from various papers to enable validation of the implementation and have been referenced accordingly. -1. Mechanical oscillator (analytical solution): Example from [1] page 2 -2. Mechanical oscillator ODE (numerical solution): Example from [2] page 19 -3. Toy example (analytical solution): Example from [2] +1. **Mechanical oscillator ODE** -.. [1] Alexanderian, Alen, Gremaud, Pierre A and Smith, Ralph C. Variance-based sensitivity analysis for time-dependent processes. + The GSI sensitivity indices are computed for a mechanical oscillator governed by a second-order differential equation [1]_. The model outputs the displacement of the oscillator for a given time period. Unlike the pointwise-in-time Sobol indices, which provide the sensitivity of the model parameters at each point in time, the GSI indices summarise the sensitivities of the model parameters over the entire time period. -.. [2] Gamboa F, Janon A, Klein T, Lagnoux A, others. Sensitivity analysis for multidimensional and functional outputs. Electronic journal of statistics 2014; 8(1): 575-603. \ No newline at end of file +2. **Toy example** + + The GSI sensitivity indices are computed for a toy model whose analytical solution is given in [1]_. + +.. [1] Gamboa F, Janon A, Klein T, Lagnoux A, others. Sensitivity analysis for multidimensional and functional outputs. Electronic journal of statistics 2014; 8(1): 575-603. \ No newline at end of file diff --git a/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_mechcanical_oscillator_ODE.py b/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_mechanical_oscillator_ODE.py similarity index 71% rename from docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_mechcanical_oscillator_ODE.py rename to docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_mechanical_oscillator_ODE.py index 62a19d96d..361bde6aa 100644 --- a/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_mechcanical_oscillator_ODE.py +++ b/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_mechanical_oscillator_ODE.py @@ -16,6 +16,10 @@ .. math:: m \sim \mathcal{U}(10, 12), c \sim \mathcal{U}(0.4, 0.8), k \sim \mathcal{U}(70, 90), \ell \sim \mathcal{U}(-1, -0.25). +Unlike the pointwise-in-time Sobol indices, which provide the sensitivity of the model +parameters at each point in time, the GSI indices summarise the sensitivities of the +model parameters over the entire time period. + """ # %% @@ -27,7 +31,9 @@ from UQpy.distributions.collection.JointIndependent import JointIndependent from UQpy.sensitivity.generalised_sobol import GeneralisedSobol -# %% +# %% [markdown] +# **Define the model and input distributions** + # Create Model object model = PythonModel( model_script="local_mechanical_oscillator_ODE.py", @@ -45,24 +51,32 @@ L = Uniform(-1, (-0.25 - -1)) dist_object = JointIndependent([M, C, K, L]) -# %% +# %% [markdown] +# **Compute generalised Sobol indices** + +# %% [markdown] SA = GeneralisedSobol(runmodel_obj, dist_object) computed_indices = SA.run(n_samples=500) # %% [markdown] +# **First order Generalised Sobol indices** +# # Expected generalised Sobol indices: # -# $GS_{m}$ = 0.0826 +# :math:`GS_{m}` = 0.0826 # -# $GS_{c}$ = 0.0020 +# :math:`GS_{c}` = 0.0020 # -# $GS_{k}$ = 0.2068 +# :math:`GS_{k}` = 0.2068 # -# $GS_{\ell}$ = 0.0561 +# :math:`GS_{\ell}` = 0.0561 # %% computed_indices["gen_sobol_i"] +# %% [markdown] +# **Total order Generalised Sobol indices** + # %% computed_indices["gen_sobol_total_i"] diff --git a/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_multioutput.py b/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_multioutput.py index d89cfec12..1b673ddcf 100644 --- a/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_multioutput.py +++ b/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_multioutput.py @@ -10,10 +10,10 @@ \end{array}\right) .. math:: - \text{case 1: } X_1, X_2 \sim \mathcal{U}(0, 1) + \text{case 1: } X_1, X_2 \sim \mathcal{N}(0, 1) .. math:: - \text{case 2: } X_1, X_2 \sim \mathcal{N}(0, 1) + \text{case 2: } X_1, X_2 \sim \mathcal{U}(0, 1) """ @@ -24,7 +24,9 @@ from UQpy.distributions.collection.JointIndependent import JointIndependent from UQpy.sensitivity.generalised_sobol import GeneralisedSobol -# %% +# %% [markdown] +# **Define the model and input distributions** + # Create Model object model = PythonModel( model_script="local_multioutput.py", @@ -37,9 +39,11 @@ # Define distribution object dist_object_1 = JointIndependent([Normal(0, 1)] * 2) -dist_object_2 = JointIndependent([Uniform(0, 1)] * 2) -# %% +# %% [markdown] +# **Compute generalised Sobol indices** + +# %% [markdown] SA = GeneralisedSobol(runmodel_obj, dist_object_1) computed_indices = SA.run( @@ -47,11 +51,15 @@ ) # %% [markdown] +# **First order Generalised Sobol indices** +# +# Expected generalised Sobol indices: +# # Gaussian case # -# $S_1$ = 0.2941 +# :math:`GS_1` = 0.2941 # -# $S_2$ = 0.1179 +# :math:`GS_2` = 0.1179 # %% computed_indices["gen_sobol_i"] @@ -59,17 +67,26 @@ # %% computed_indices["gen_sobol_total_i"] -# %% +# %% [markdown] +# **Compute generalised Sobol indices** + +# %% [markdown] +dist_object_2 = JointIndependent([Uniform(0, 1)] * 2) + SA = GeneralisedSobol(runmodel_obj, dist_object_2) computed_indices = SA.run(n_samples=100_000) # %% [markdown] -# Gaussian case +# **First order Generalised Sobol indices** +# +# Expected generalised Sobol indices: +# +# Uniform case # -# $S_1$ = 0.6084 +# :math:`GS_1` = 0.6084 # -# $S_2$ = 0.3566 +# :math:`GS_2` = 0.3566 # %% computed_indices["gen_sobol_i"] diff --git a/docs/source/sensitivity/generalised_sobol.rst b/docs/source/sensitivity/generalised_sobol.rst index 3515a744b..402b3190e 100644 --- a/docs/source/sensitivity/generalised_sobol.rst +++ b/docs/source/sensitivity/generalised_sobol.rst @@ -11,16 +11,27 @@ As the inputs :math:`X_{1}, \ldots, X_{d}` are independent, :math:`f` may be dec f(X) = c + f_{\mathbf{u}}\left(X_{\mathbf{u}}\right)+f_{\sim \mathbf{u}}\left(X_{\sim \mathbf{u}}\right) + f_{\mathbf{u}, \sim \mathbf{u}}\left(X_{\mathbf{u}}, X_{\sim \mathbf{u}}\right) where :math:`c \in \mathbb{R}^{k}, f_{\mathbf{u}}: E_{\mathbf{u}} \rightarrow \mathbb{R}^{k}, f_{\sim \mathbf{u}}: E_{\sim \mathbf{u}} \rightarrow \mathbb{R}^{k}` and :math:`f_{\mathbf{u}, \sim \mathbf{u}}: E \rightarrow \mathbb{R}^{k}` are given by -:math:`c=\mathbb{E}(Y), f_{\mathbf{u}}=\mathbb{E}\left(Y \mid X_{\mathbf{u}}\right)-c, f_{\sim \mathbf{u}}=\mathbb{E}\left(Y \mid X_{\sim \mathbf{u}}\right)-c, f_{u, \sim \mathbf{u}}=Y-f_{\mathbf{u}}-f_{\sim \mathbf{u}}-c` + +.. math:: + c = \mathbb{E}(Y), + +.. math:: + f_{\mathbf{u}}=\mathbb{E}\left(Y \mid X_{\mathbf{u}}\right)-c, + +.. math:: + f_{\sim \mathbf{u}}=\mathbb{E}\left(Y \mid X_{\sim \mathbf{u}}\right)-c, + +.. math:: + f_{u, \sim \mathbf{u}}=Y-f_{\mathbf{u}}-f_{\sim \mathbf{u}}-c. Thanks to :math:`L^{2}`-orthogonality, computing the covariance matrix of both sides of the above equation leads to .. math:: \Sigma = C_{\mathbf{u}}+C_{\sim \mathbf{u}}+C_{\mathbf{u}, \sim \mathbf{u}}. -Here, :math:`\Sigma, C_{\mathbf{u}}, C_{\sim \mathbf{u}}` and :math:`C_{\mathbf{u}, \sim \mathbf{u}}` are denoting respectively the covariance matrices of :math:`Y, f_{\mathbf{u}}\left(X_{\mathbf{u}}\right), f_{\sim \mathbf{u}}\left(X_{\sim \mathbf{u}}\right)` and :math:`f_{\mathbf{u}, \sim \mathbf{u}}\left(X_{\mathbf{u}}, X_{\sim \mathbf{u}}\right)`. +Here, :math:`\Sigma, C_{\mathbf{u}}, C_{\sim \mathbf{u}}` and :math:`C_{\mathbf{u}, \sim \mathbf{u}}` are denoting the covariance matrices of :math:`Y, f_{\mathbf{u}}\left(X_{\mathbf{u}}\right), f_{\sim \mathbf{u}}\left(X_{\sim \mathbf{u}}\right)` and :math:`f_{\mathbf{u}, \sim \mathbf{u}}\left(X_{\mathbf{u}}, X_{\sim \mathbf{u}}\right)` respectively. -The First order generalised Sobol indices can be computed using the Pick-and-Freeze approach as follows, where :math:`\mathbf{u}` is a variable :math:`i` of the independent random variables. +The first order generalised Sobol indices can be computed using the Pick-and-Freeze approach as follows, where :math:`\mathbf{u}` is a variable :math:`i` of the independent random variables. .. math:: S_{i, N}=\frac{\operatorname{Tr}\left(C_{i, N}\right)}{\operatorname{Tr}\left(\Sigma_{N}\right)} From d842ec67457ba1cc49df0eb005af160b7e2809f4 Mon Sep 17 00:00:00 2001 From: Prateek Bhustali Date: Mon, 9 May 2022 23:14:18 +0200 Subject: [PATCH 30/88] Improved documentation Sobol indices + Fomatting + Detailed descriptions --- docs/code/sensitivity/sobol/README.rst | 23 ++++-- docs/code/sensitivity/sobol/local_additive.py | 21 +++++ .../sensitivity/sobol/local_exponential.py | 20 ----- .../sobol/plot_mechanical_oscillator_ODE.py | 16 +++- ..._exponential.py => plot_sobol_additive.py} | 36 ++++----- .../code/sensitivity/sobol/plot_sobol_func.py | 78 +++++++++++-------- .../sensitivity/sobol/plot_sobol_ishigami.py | 24 +++--- docs/source/sensitivity/sobol.rst | 14 ++-- 8 files changed, 132 insertions(+), 100 deletions(-) create mode 100644 docs/code/sensitivity/sobol/local_additive.py delete mode 100644 docs/code/sensitivity/sobol/local_exponential.py rename docs/code/sensitivity/sobol/{plot_sobol_exponential.py => plot_sobol_additive.py} (50%) diff --git a/docs/code/sensitivity/sobol/README.rst b/docs/code/sensitivity/sobol/README.rst index 1be801c21..c927409f5 100644 --- a/docs/code/sensitivity/sobol/README.rst +++ b/docs/code/sensitivity/sobol/README.rst @@ -7,18 +7,29 @@ Single output models ====================== We demonstrate the computation of the Sobol indices for models with a single output using the following examples: -1. Ishigami function -2. Exponential function -3. Sobol function with parameters a := [0., 0.5, 3., 9., 99., 99.] : Example from [2] page 11 +1. **Additive function** + + This is a beginner-friendly example for introducing Sobol indices. The function is a linear combination of two inputs which produces a scalar output. + +2. **Ishigami function** + + The Ishigami function is a non-linear, non-monotonic function that is commonly used to benchmark uncertainty and senstivity analysis methods. + +3. **Sobol function** + + The Sobol function is non-linear function that is commonly used to benchmark uncertainty + and senstivity analysis methods. Unlike the Ishigami function which has 3 input + variables, the Sobol function can have any number of input variables (see [2]_). Multiple output models ======================== We demonstrate the computation of the Sobol indices for models with multiple outputs using the following example: -1. Mechanical oscillator ODE (numerical model): Example from [1] page 19 +1. **Mechanical oscillator ODE** + The Sobol indices are computed for a mechanical oscillator governed by a second-order differential equation [1]_. The model outputs the displacement of the oscillator for a given time period. Here the sensitivity of the model parameters are computed at each point in time (see [1]_). -[1] Gamboa F, Janon A, Klein T, Lagnoux A, others. Sensitivity analysis for multidimensional and functional outputs. Electronic journal of statistics 2014; 8(1): 575-603. +.. [1] Gamboa F, Janon A, Klein T, Lagnoux A, others. Sensitivity analysis for multidimensional and functional outputs. Electronic journal of statistics 2014; 8(1): 575-603. -[2] Saltelli, A. (2002). Making best use of model evaluations to compute indices. +.. [2] Saltelli, A. (2002). Making best use of model evaluations to compute indices. diff --git a/docs/code/sensitivity/sobol/local_additive.py b/docs/code/sensitivity/sobol/local_additive.py new file mode 100644 index 000000000..a0893fa11 --- /dev/null +++ b/docs/code/sensitivity/sobol/local_additive.py @@ -0,0 +1,21 @@ +""" + +Auxiliary file +============================================== + +""" + +import numpy as np + + +def evaluate(X, params) -> np.array: + r"""A linear function that is used to demonstrate sensitivity indices. + + .. math:: + f(x) = a \cdot x_1 + b \cdot x_2 + """ + a, b = params + + Y = a * X[:, 0] + b * X[:, 1] + + return Y diff --git a/docs/code/sensitivity/sobol/local_exponential.py b/docs/code/sensitivity/sobol/local_exponential.py deleted file mode 100644 index 1fd0ef0d9..000000000 --- a/docs/code/sensitivity/sobol/local_exponential.py +++ /dev/null @@ -1,20 +0,0 @@ -""" - -Auxiliary file -============================================== - -""" - -import numpy as np - - -def evaluate(X: np.array) -> np.array: - r"""A non-linear function that is used to demonstrate sensitivity index. - - .. math:: - f(x) = \exp(x_1 + 2*x_2) - """ - - Y = np.exp(X[:, 0] + 2 * X[:, 1]) - - return Y diff --git a/docs/code/sensitivity/sobol/plot_mechanical_oscillator_ODE.py b/docs/code/sensitivity/sobol/plot_mechanical_oscillator_ODE.py index 6e03332d0..05636321f 100644 --- a/docs/code/sensitivity/sobol/plot_mechanical_oscillator_ODE.py +++ b/docs/code/sensitivity/sobol/plot_mechanical_oscillator_ODE.py @@ -16,6 +16,10 @@ .. math:: m \sim \mathcal{U}(10, 12), c \sim \mathcal{U}(0.4, 0.8), k \sim \mathcal{U}(70, 90), \ell \sim \mathcal{U}(-1, -0.25). +Here, we compute the Sobol indices for each point in time and are called +pointwise-in-time Sobol indices. These indices describe the sensitivity of the model +parameters at each point in time. + """ # %% @@ -28,7 +32,9 @@ from UQpy.distributions.collection.JointIndependent import JointIndependent from UQpy.sensitivity.sobol import Sobol -# %% +# %% [markdown] +# **Define the model and input distributions** + # Create Model object model = PythonModel( model_script="local_mechanical_oscillator_ODE.py", @@ -46,13 +52,17 @@ L = Uniform(-1, (-0.25 - -1)) dist_object = JointIndependent([M, C, K, L]) -# %% +# %% [markdown] +# **Compute Sobol indices** + +# %% [markdown] SA = Sobol(runmodel_obj, dist_object) computed_indices = SA.run(n_samples=500) # %% -# Plot the Sobol indices +# **Plot the Sobol indices** + t_0 = 0 t_f = 40 dt = 0.05 diff --git a/docs/code/sensitivity/sobol/plot_sobol_exponential.py b/docs/code/sensitivity/sobol/plot_sobol_additive.py similarity index 50% rename from docs/code/sensitivity/sobol/plot_sobol_exponential.py rename to docs/code/sensitivity/sobol/plot_sobol_additive.py index 81c9b78e9..4645ed749 100644 --- a/docs/code/sensitivity/sobol/plot_sobol_exponential.py +++ b/docs/code/sensitivity/sobol/plot_sobol_additive.py @@ -1,10 +1,10 @@ """ -Exponential function +Additive function ============================================== .. math:: - f(x) := \exp(x_1 + 2x_2), \quad x_1, x_2 \sim \mathcal{N}(0, 1) + f(x) = a \cdot X_1 + b \cdot X_2, \quad X_1, X_2 \sim \mathcal{N}(0, 1), \quad a,b \in \mathbb{R} """ @@ -15,16 +15,21 @@ from UQpy.distributions.collection.JointIndependent import JointIndependent from UQpy.sensitivity.sobol import Sobol -# %% +# %% [markdown] +# **Define the model and input distributions** + # Create Model object +a, b = 1, 2 + model = PythonModel( - model_script="local_exponential.py", + model_script="local_additive.py", model_object_name="evaluate", var_names=[ "X_1", "X_2", ], delete_files=True, + params=[a, b], ) runmodel_obj = RunModel(model=model) @@ -33,28 +38,21 @@ dist_object = JointIndependent([Normal(0, 1)] * 2) # %% [markdown] -# Compute Sobol indices +# **Compute Sobol indices** -# %% +# %% [markdown] SA = Sobol(runmodel_obj, dist_object) -# Compute Sobol indices using the pick and freeze algorithm -computed_indices = SA.run( - n_samples=100_000, num_bootstrap_samples=1_000, confidence_level=0.95 -) +computed_indices = SA.run(n_samples=50_000) # %% [markdown] -# Expected first order Sobol indices (computed analytically): +# **First order Sobol indices** +# +# Expected first order Sobol indices: # -# X1: 0.0118 +# :math:`\mathrm{S}_1 = \frac{a^2 \cdot \mathbb{V}[X_1]}{a^2 \cdot \mathbb{V}[X_1] + b^2 \cdot \mathbb{V}[X_2]} = \frac{1^2 \cdot 1}{1^2 \cdot 1 + 2^2 \cdot 1} = 0.2` # -# X2: 0.3738 +# :math:`\mathrm{S}_2 = \frac{b^2 \cdot \mathbb{V}[X_2]}{a^2 \cdot \mathbb{V}[X_1] + b^2 \cdot \mathbb{V}[X_2]} = \frac{2^2 \cdot 1}{1^2 \cdot 1 + 2^2 \cdot 1} = 0.8` # %% computed_indices["sobol_i"] - -# %% [markdown] -# Confidence intervals for first order Sobol indices - -# %% -computed_indices["CI_sobol_i"] diff --git a/docs/code/sensitivity/sobol/plot_sobol_func.py b/docs/code/sensitivity/sobol/plot_sobol_func.py index 0f7f7ed0d..d2640955b 100644 --- a/docs/code/sensitivity/sobol/plot_sobol_func.py +++ b/docs/code/sensitivity/sobol/plot_sobol_func.py @@ -3,6 +3,10 @@ Sobol function ============================================== +The Sobol function is non-linear function that is commonly used to benchmark uncertainty +and senstivity analysis methods. Unlike the Ishigami function which has 3 input +variables, the Sobol function can have any number of input variables. + .. math:: g(x_1, x_2, \ldots, x_D) := \prod_{i=1}^{D} \frac{|4x_i - 2| + a_i}{1 + a_i}, @@ -23,7 +27,9 @@ from UQpy.distributions.collection.JointIndependent import JointIndependent from UQpy.sensitivity.sobol import Sobol -# %% +# %% [markdown] +# **Define the model and input distributions** + # Create Model object num_vars = 6 a_vals = np.array([0.0, 0.5, 3.0, 9.0, 99.0, 99.0]) @@ -42,83 +48,89 @@ dist_object = JointIndependent([Uniform(0, 1)] * num_vars) # %% [markdown] -# #### Compute Sobol indices +# **Compute Sobol indices** -# %% +# %% [markdown] SA = Sobol(runmodel_obj, dist_object) # Compute Sobol indices using the pick and freeze algorithm computed_indices = SA.run(n_samples=50_000, estimate_second_order=True) # %% [markdown] -# First order Sobol indices +# **First order Sobol indices** +# +# Expected first order Sobol indices: # -# $S_1$ = 5.86781190e-01 +# :math:`S_1` = 5.86781190e-01 # -# $S_2$ = 2.60791640e-01 +# :math:`S_2` = 2.60791640e-01 # -# $S_3$ = 3.66738244e-02 +# :math:`S_3` = 3.66738244e-02 # -# $S_4$ = 5.86781190e-03 +# :math:`S_4` = 5.86781190e-03 # -# $S_5$ = 5.86781190e-05 +# :math:`S_5` = 5.86781190e-05 # -# $S_6$ = 5.86781190e-05 +# :math:`S_6` = 5.86781190e-05 # %% computed_indices["sobol_i"] # %% [markdown] -# Total order Sobol indices +# **Total order Sobol indices** # -# $S_{T_1}$ = 6.90085892e-01 +# Expected total order Sobol indices: # -# $S_{T_2}$ = 3.56173364e-01 +# :math:`S_{T_1}` = 6.90085892e-01 # -# $S_{T_3}$ = 5.63335422e-02 +# :math:`S_{T_2}` = 3.56173364e-01 # -# $S_{T_4}$ = 9.17057664e-03 +# :math:`S_{T_3}` = 5.63335422e-02 # -# $S_{T_5}$ = 9.20083854e-05 +# :math:`S_{T_4}` = 9.17057664e-03 # -# $S_{T_6}$ = 9.20083854e-05 +# :math:`S_{T_5}` = 9.20083854e-05 +# +# :math:`S_{T_6}` = 9.20083854e-05 # # %% computed_indices["sobol_total_i"] # %% [markdown] -# Second-order Sobol indices +# **Second order Sobol indices** +# +# Expected second order Sobol indices: # -# $S_{12}$ = 0.0869305 +# :math:`S_{T_{12}}` = 0.0869305 # -# $S_{13}$ = 0.0122246 +# :math:`S_{T_{13}}` = 0.0122246 # -# $S_{14}$ = 0.00195594 +# :math:`S_{T_{14}}` = 0.00195594 # -# $S_{15}$ = 0.00001956 +# :math:`S_{T_{15}}` = 0.00001956 # -# $S_{16}$ = 0.00001956 +# :math:`S_{T_{16}}` = 0.00001956 # -# $S_{23}$ = 0.00543316 +# :math:`S_{T_{23}}` = 0.00543316 # -# $S_{24}$ = 0.00086931 +# :math:`S_{T_{24}}` = 0.00086931 # -# $S_{25}$ = 0.00000869 +# :math:`S_{T_{25}}` = 0.00000869 # -# $S_{26}$ = 0.00000869 +# :math:`S_{T_{26}}` = 0.00000869 # -# $S_{34}$ = 0.00012225 +# :math:`S_{T_{34}}` = 0.00012225 # -# $S_{35}$ = 0.00000122 +# :math:`S_{T_{35}}` = 0.00000122 # -# $S_{36}$ = 0.00000122 +# :math:`S_{T_{36}}` = 0.00000122 # -# $S_{45}$ = 0.00000020 +# :math:`S_{T_{45}}` = 0.00000020 # -# $S_{46}$ = 0.00000020 +# :math:`S_{T_{46}}` = 0.00000020 # -# $S_{56}$ = 2.0e-9 +# :math:`S_{T_{56}}` = 2.0e-9 # %% computed_indices["sobol_ij"] diff --git a/docs/code/sensitivity/sobol/plot_sobol_ishigami.py b/docs/code/sensitivity/sobol/plot_sobol_ishigami.py index f067aa4b3..d04649811 100644 --- a/docs/code/sensitivity/sobol/plot_sobol_ishigami.py +++ b/docs/code/sensitivity/sobol/plot_sobol_ishigami.py @@ -3,8 +3,8 @@ Ishigami function ============================================== -The ishigami function is a non-linear, non-monotonic function that is commonly used in -uncertainty and senstivity analysis methods. +The ishigami function is a non-linear, non-monotonic function that is commonly used to +benchmark uncertainty and senstivity analysis methods. .. math:: f(x_1, x_2, x_3) = sin(x_1) + a \cdot sin^2(x_2) + b \cdot x_3^4 sin(x_1) @@ -31,7 +31,7 @@ S_{T_1} = \frac{V_{T1}}{\mathbb{V}[Y]}, \quad S_{T_2} = \frac{V_{T2}}{\mathbb{V}[Y]}, \quad S_{T_3} = \frac{V_{T3}}{\mathbb{V}[Y]} .. math:: - V_{T1} = 0.5 (1 + \frac{b\pi^4}{5})^2 + \frac{8b^2\pi^8}{225}, \quad V_{T2}= \frac{a^2}{8}, \quad V_{T3} = \frac{8b^2\pi^8}{225} + V_{T_1} = 0.5 (1 + \frac{b\pi^4}{5})^2 + \frac{8b^2\pi^8}{225}, \quad V_{T_2}= \frac{a^2}{8}, \quad V_{T_3} = \frac{8b^2\pi^8}{225} .. math:: \mathbb{V}[Y] = \frac{a^2}{8} + \frac{b\pi^4}{5} + \frac{b^2\pi^8}{18} + \frac{1}{2} @@ -74,15 +74,15 @@ computed_indices = SA.run(n_samples=100_000, num_bootstrap_samples=100) # %% [markdown] -# **Sobol indices** +# **First order Sobol indices** # # Expected first order Sobol indices: # -# X1: 0.3139 +# :math:`S_1` = 0.3139 # -# X2: 0.4424 +# :math:`S_2` = 0.4424 # -# X3: 0.0 +# :math:`S_3` = 0.0 # %% computed_indices["sobol_i"] @@ -92,23 +92,23 @@ # # Expected total order Sobol indices: # -# X1: 0.55758886 +# :math:`S_{T_1}` = 0.55758886 # -# X2: 0.44241114 +# :math:`S_{T_2}` = 0.44241114 # -# X3: 0.24368366 +# :math:`S_{T_3}` = 0.24368366 # %% computed_indices["sobol_total_i"] # %% [markdown] -# Confidence intervals for first order Sobol indices +# **Confidence intervals for first order Sobol indices** # %% computed_indices["CI_sobol_i"] # %% [markdown] -# Confidence intervals for total order Sobol indices +# **Confidence intervals for total order Sobol indices** # %% computed_indices["CI_sobol_total_i"] diff --git a/docs/source/sensitivity/sobol.rst b/docs/source/sensitivity/sobol.rst index fa4d49265..45f20b612 100644 --- a/docs/source/sensitivity/sobol.rst +++ b/docs/source/sensitivity/sobol.rst @@ -2,22 +2,22 @@ Sobol indices ---------------------------------------- -Sobol indices are the standard approach to calculate a global variance based sensitivity analysis. +Sobol indices are the standard approach for performing a global sensitivity analysis. The indices are based on a variance decomposition of the model output. Using this decomposition allows us to assign the contribution of uncertain inputs to the variance of the model output. There are three main groups of indices: -- First order indices (:math:`S_{i}`): Describe the fraction of the output variance due to a single uncertain input parameter. This amount of variance can be reduced if the uncertainty in the corresponding input is eliminated. +- First order indices (:math:`S_{i}`): Describe the fraction of the output variance due to a single uncertain input parameter :math:`i`. This amount of variance can be reduced if the uncertainty in the corresponding input is eliminated. - Higher order indices: Describe the fraction of the output variance due to interactions between uncertain input parameters. For example, the second order indices (:math:`S_{ij}`) describe the fraction of the output variance due to interactions between two uncertain input parameters :math:`i` and :math:`j`. -- Total order indices (:math:`S_{T_{i}}`): Describe the fraction of the output variance due to a single input parameter and all higher order effects the input parameter is involved. +- Total order indices (:math:`S_{T_{i}}`): Describe the fraction of the output variance due to a single input parameter :math:`i` and all higher order effects of the input parameter. -If the first order index of an input parameter is equal to the total order index it implies that the parameter is not involved in any interaction effects. +If the first order index of an input parameter is equal to the total order index it implies that the parameter does not have any interaction effects. -The Sobol indices are computed using the Pick-and-Freeze approach for single output and multi-output models. Since there are several variants of the Pick-and-Freeze approach, the schemes implemented to compute Sobol indices are listed below: +The Sobol indices are typically computed using the Pick-and-Freeze approach for single output and multi-output models. Since there are several variants of the Pick-and-Freeze approach, the schemes implemented to compute Sobol indices are listed below: -(where, :math:`N` is the number of Monte Carlo samples and :math:`m` being the number of input parameters in the model) +Here, :math:`N` is the number of Monte Carlo samples and :math:`m` being the number of input parameters in the model. 1. **First order indices** (:math:`S_{i}`) @@ -29,7 +29,7 @@ The Sobol indices are computed using the Pick-and-Freeze approach for single out .. math:: y_{A}=f(A), \quad y_{C_{i}}=f(C_{i}), \quad f_{0}^{2}=\left(\frac{1}{2N} \sum_{j=1}^{N} y_{A}^{(j)} + y_{C_{i}}^{(j)} \right)^{2} -Compared to "Sobol1993", the "Janon2014" estimator makes more efficient use of model evaluations and produces better smaller confidence intervals. +Compared to "Sobol1993", the "Janon2014" estimator makes more efficient use of model evaluations and produces smaller (better) confidence intervals. - Sobol1993: Requires :math:`N(m + 1)` model evaluations [1]_. From 6cf455cd7a2178f2dcdd3692fb2b039967fa7c18 Mon Sep 17 00:00:00 2001 From: Dimitris Tsapetis Date: Tue, 17 May 2022 11:47:15 -0400 Subject: [PATCH 31/88] Fixes moment estimation error --- README.rst | 2 +- docs/requirements.txt | 2 +- .../PolynomialChaosExpansion.py | 167 +++++++++--------- tests/unit_tests/surrogates/test_pce.py | 94 ++++++++++ 4 files changed, 183 insertions(+), 82 deletions(-) diff --git a/README.rst b/README.rst index e888bb13c..6ff98434d 100644 --- a/README.rst +++ b/README.rst @@ -61,7 +61,7 @@ Dependencies * :: - Python >= 3.6 + Python >= 3.9 Git >= 2.13.1 License diff --git a/docs/requirements.txt b/docs/requirements.txt index 1e36149af..fda2af357 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,4 +1,4 @@ - test-uqpy + UQpy sphinx_autodoc_typehints sphinx_rtd_theme sphinx_gallery diff --git a/src/UQpy/surrogates/polynomial_chaos/PolynomialChaosExpansion.py b/src/UQpy/surrogates/polynomial_chaos/PolynomialChaosExpansion.py index 5e078d298..88602ba75 100644 --- a/src/UQpy/surrogates/polynomial_chaos/PolynomialChaosExpansion.py +++ b/src/UQpy/surrogates/polynomial_chaos/PolynomialChaosExpansion.py @@ -8,7 +8,8 @@ from UQpy.surrogates.polynomial_chaos.regressions.baseclass.Regression import Regression from UQpy.surrogates.polynomial_chaos.polynomials.TotalDegreeBasis import PolynomialBasis from UQpy.distributions import Uniform, Normal -from UQpy.surrogates.polynomial_chaos.polynomials import Legendre,Hermite +from UQpy.surrogates.polynomial_chaos.polynomials import Legendre, Hermite + class PolynomialChaosExpansion(Surrogate): @@ -97,29 +98,29 @@ def leaveoneout_error(self): :return: Cross validation error of experimental design. """ - - x=self.experimental_design_input - y=self.experimental_design_output - + + x = self.experimental_design_input + y = self.experimental_design_output + if y.ndim == 1 or y.shape[1] == 1: y = y.reshape(-1, 1) - + n_samples = x.shape[0] mu_yval = (1 / n_samples) * np.sum(y, axis=0) - y_val = self.predict(x, ) - polynomialbasis= self.design_matrix - + y_val = self.predict(x, ) + polynomialbasis = self.design_matrix + H = np.dot(polynomialbasis, np.linalg.pinv(np.dot(polynomialbasis.T, polynomialbasis))) H *= polynomialbasis - Hdiag = np.sum(H, axis=1).reshape(-1,1) - + Hdiag = np.sum(H, axis=1).reshape(-1, 1) - eps_val=((n_samples - 1) / n_samples *np.sum(((y - y_val)/(1 - Hdiag))**2,axis=0)) / (np.sum((y - mu_yval) ** 2, axis=0)) + eps_val = ((n_samples - 1) / n_samples * np.sum(((y - y_val) / (1 - Hdiag)) ** 2, axis=0)) / ( + np.sum((y - mu_yval) ** 2, axis=0)) if y.ndim == 1 or y.shape[1] == 1: eps_val = float(eps_val) return np.round(eps_val, 7) - + def validation_error(self, x: np.ndarray, y: np.ndarray): """ Returns the validation error. @@ -180,7 +181,7 @@ def get_moments(self, higher: bool = False): :param higher: True corresponds to calculation of skewness and kurtosis (computationaly expensive for large basis set). :return: Returns the mean and variance. """ - + if self.bias is not None: mean = self.coefficients[0, :] + np.squeeze(self.bias) else: @@ -192,77 +193,83 @@ def get_moments(self, higher: bool = False): variance = float(variance) mean = float(mean) - if higher==False: - return np.round(mean, 4), np.round(variance, 4) - + if not higher: + return mean, variance + else: - multindex=self.multi_index_set - P,inputs_number=multindex.shape - - if inputs_number==1: - marginals=[self.polynomial_basis.distributions] - + multindex = self.multi_index_set + P, inputs_number = multindex.shape + + if inputs_number == 1: + marginals = [self.polynomial_basis.distributions] + else: - marginals=self.polynomial_basis.distributions.marginals - - - skewness=np.zeros(self.outputs_number) - kurtosis=np.zeros(self.outputs_number) - - for ii in range (0,self.outputs_number): - - Beta=self.coefficients[:, ii] - third_moment=0 - fourth_moment=0 - - indices=np.array(np.meshgrid(range(1,P),range(1,P),range(1,P),range(1,P))).T.reshape(-1,4) - i=0 + marginals = self.polynomial_basis.distributions.marginals + + skewness = np.zeros(self.outputs_number) + kurtosis = np.zeros(self.outputs_number) + + for ii in range(0, self.outputs_number): + + Beta = self.coefficients[:, ii] + third_moment = 0 + fourth_moment = 0 + + indices = np.array(np.meshgrid(range(1, P), range(1, P), range(1, P), range(1, P))).T.reshape(-1, 4) + i = 0 for index in indices: - tripleproduct_ND=1 - quadproduct_ND=1 - - - for m in range (0,inputs_number): - - if i<(P-1)**3: - - if type(marginals[m])==Normal: - tripleproduct_1D=Hermite.hermite_triple_product(multindex[index[0],m],multindex[index[1],m],multindex[index[2],m]) - - if type(marginals[m])==Uniform: - tripleproduct_1D=Legendre.legendre_triple_product(multindex[index[0],m],multindex[index[1],m],multindex[index[2],m]) - - tripleproduct_ND=tripleproduct_ND*tripleproduct_1D - + tripleproduct_ND = 1 + quadproduct_ND = 1 + + for m in range(0, inputs_number): + + if i < (P - 1) ** 3: + + if type(marginals[m]) == Normal: + tripleproduct_1D = Hermite.hermite_triple_product(multindex[index[0], m], + multindex[index[1], m], + multindex[index[2], m]) + + if type(marginals[m]) == Uniform: + tripleproduct_1D = Legendre.legendre_triple_product(multindex[index[0], m], + multindex[index[1], m], + multindex[index[2], m]) + + tripleproduct_ND = tripleproduct_ND * tripleproduct_1D + else: - tripleproduct_ND=0 - - quadproduct_1D=0 - - for n in range (0,multindex[index[0],m]+multindex[index[1],m]+1): - - if type(marginals[m])==Normal: - tripproduct1=Hermite.hermite_triple_product(multindex[index[0],m],multindex[index[1],m],n) - tripproduct2=Hermite.hermite_triple_product(multindex[index[2],m],multindex[index[3],m],n) - - if type(marginals[m])==Uniform: - tripproduct1=Legendre.legendre_triple_product(multindex[index[0],m],multindex[index[1],m],n) - tripproduct2=Legendre.legendre_triple_product(multindex[index[2],m],multindex[index[3],m],n) - - quadproduct_1D=quadproduct_1D+tripproduct1*tripproduct2 - - quadproduct_ND=quadproduct_ND*quadproduct_1D - - third_moment+=tripleproduct_ND*Beta[index[0]]*Beta[index[1]]*Beta[index[2]] - fourth_moment+=quadproduct_ND*Beta[index[0]]*Beta[index[1]]*Beta[index[2]]*Beta[index[3]] - - i+=1 - - skewness[ii]=1/(np.sqrt(variance)**3)*third_moment - kurtosis[ii]=1/(variance**2)*fourth_moment - + tripleproduct_ND = 0 + + quadproduct_1D = 0 + + for n in range(0, multindex[index[0], m] + multindex[index[1], m] + 1): + + if type(marginals[m]) == Normal: + tripproduct1 = Hermite.hermite_triple_product(multindex[index[0], m], + multindex[index[1], m], n) + tripproduct2 = Hermite.hermite_triple_product(multindex[index[2], m], + multindex[index[3], m], n) + + if type(marginals[m]) == Uniform: + tripproduct1 = Legendre.legendre_triple_product(multindex[index[0], m], + multindex[index[1], m], n) + tripproduct2 = Legendre.legendre_triple_product(multindex[index[2], m], + multindex[index[3], m], n) + + quadproduct_1D = quadproduct_1D + tripproduct1 * tripproduct2 + + quadproduct_ND = quadproduct_ND * quadproduct_1D + + third_moment += tripleproduct_ND * Beta[index[0]] * Beta[index[1]] * Beta[index[2]] + fourth_moment += quadproduct_ND * Beta[index[0]] * Beta[index[1]] * Beta[index[2]] * Beta[index[3]] + + i += 1 + + skewness[ii] = 1 / (np.sqrt(variance) ** 3) * third_moment + kurtosis[ii] = 1 / (variance ** 2) * fourth_moment + if self.coefficients.ndim == 1 or self.coefficients.shape[1] == 1: skewness = float(skewness[0]) kurtosis = float(kurtosis[0]) - return mean,variance,skewness,kurtosis + return mean, variance, skewness, kurtosis diff --git a/tests/unit_tests/surrogates/test_pce.py b/tests/unit_tests/surrogates/test_pce.py index b4d5c9872..2a0b95199 100644 --- a/tests/unit_tests/surrogates/test_pce.py +++ b/tests/unit_tests/surrogates/test_pce.py @@ -222,6 +222,100 @@ def test_17(): assert round(generalized_first_sobol[0], 4) == 0.0137 +def test_lotka_volterra_generalized_sobol(): + import numpy as np + import math + from scipy import integrate + from UQpy.distributions import Uniform, JointIndependent + from UQpy.surrogates.polynomial_chaos import TotalDegreeBasis, \ + LeastSquareRegression, \ + PolynomialChaosExpansion + from UQpy.sensitivity.PceSensitivity import PceSensitivity + + ### function to be approximated + def LV(a, b, c, d, t): + + # X_f0 = np.array([ 0. , 0.]) + X_f1 = np.array([c / (d * b), a / b]) + + def dX_dt(X, t=0): + """ Return the growth rate of fox and rabbit populations. """ + return np.array([a * X[0] - b * X[0] * X[1], + -c * X[1] + d * b * X[0] * X[1]]) + + X0 = np.array([10, 5]) # initials conditions: 10 rabbits and 5 foxes + + X, infodict = integrate.odeint(dX_dt, X0, t, full_output=True) + + return X, X_f1 + + # set random seed for reproducibility + np.random.seed(1) + + ### simulation parameters + n = 512 + t = np.linspace(0, 25, n) + + ### Probability distributions of input parameters + pdf1 = Uniform(loc=0.9, scale=0.1) # a + pdf2 = Uniform(loc=0.1, scale=0.05) # b + # pdf2 = Uniform(loc=8, scale=10) # c + # pdf2 = Uniform(loc=8, scale=10) # d + c = 1.5 + d = 0.75 + margs = [pdf1, pdf2] + joint = JointIndependent(marginals=margs) + + print('Total degree: ', max_degree) + polynomial_basis = TotalDegreeBasis(joint, max_degree) + + print('Size of basis:', polynomial_basis.polynomials_number) + # training data + sampling_coeff = 5 + print('Sampling coefficient: ', sampling_coeff) + np.random.seed(42) + n_samples = math.ceil(sampling_coeff * polynomial_basis.polynomials_number) + print('Training data: ', n_samples) + x_train = joint.rvs(n_samples) + y_train = [] + for i in range(x_train.shape[0]): + out, X_f1 = LV(x_train[i, 0], x_train[i, 1], c, d, t) + y_train.append(out.flatten()) + print('Training sample size:', n_samples) + + # fit model + least_squares = LeastSquareRegression() + pce_metamodel = PolynomialChaosExpansion(polynomial_basis=polynomial_basis, + regression_method=least_squares) + pce_metamodel.fit(x_train, y_train) + + # approximation errors + np.random.seed(43) + n_samples_test = 5000 + x_test = joint.rvs(n_samples_test) + y_test = [] + for i in range(x_test.shape[0]): + out, X_f1 = LV(x_test[i, 0], x_test[i, 1], c, d, t) + y_test.append(out.flatten()) + print('Test sample size:', n_samples_test) + + y_test_pce = pce_metamodel.predict(x_test) + errors = np.abs(y_test_pce - y_test) + l2_rel_err = np.linalg.norm(errors, axis=1) / np.linalg.norm(y_test, axis=1) + + l2_rel_err_mean = np.mean(l2_rel_err) + print('Mean L2 relative error:', l2_rel_err_mean) + + # Sobol sensitivity analysis + pce_sa = PceSensitivity(pce_metamodel) + GS1 = pce_sa.calculate_generalized_first_order_indices() + assert round(GS1[0], 4) == 0.2148 + assert round(GS1[1], 4) == 0.7426 + GST = pce_sa.calculate_generalized_total_order_indices() + assert round(GST[0], 4) == 0.2574 + assert round(GST[1], 4) == 0.7852 + + def test_18(): """ Test Sobol indices for vector-valued quantity of interest on the random inputs From 6b41d55cff2291322bb1f7d9f166435a4fe9879c Mon Sep 17 00:00:00 2001 From: Dimitris Tsapetis Date: Wed, 18 May 2022 16:33:42 -0400 Subject: [PATCH 32/88] Fix for missing beartype package --- meta.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/meta.yaml b/meta.yaml index fe9d984a9..a7fabc456 100644 --- a/meta.yaml +++ b/meta.yaml @@ -12,6 +12,7 @@ requirements: - pip run: - python + - beartype 0.9.1 about: home: https://github.com/SURGroup/UQpy From 12a5b8252676f4bd3ec1bd3641a63a4b358a1d58 Mon Sep 17 00:00:00 2001 From: Prateek Bhustali Date: Mon, 23 May 2022 12:55:17 +0200 Subject: [PATCH 33/88] Added NumpyIntArray to support int arrays as input --- src/UQpy/utilities/ValidationTypes.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/src/UQpy/utilities/ValidationTypes.py b/src/UQpy/utilities/ValidationTypes.py index d73abf94e..c52c4f095 100644 --- a/src/UQpy/utilities/ValidationTypes.py +++ b/src/UQpy/utilities/ValidationTypes.py @@ -14,9 +14,15 @@ np.ndarray, Is[lambda array: np.issubdtype(array.dtype, float)], ] +NumpyIntArray = Annotated[ + np.ndarray, + Is[lambda array: np.issubdtype(array.dtype, int)], +] Numpy2DFloatArrayOrthonormal = Annotated[ np.ndarray, - Is[lambda array: array.ndim == 2 and np.issubdtype(array.dtype, float) and - np.allclose(array.T @ array, np.eye(array.shape[1]))], + Is[ + lambda array: array.ndim == 2 + and np.issubdtype(array.dtype, float) + and np.allclose(array.T @ array, np.eye(array.shape[1])) + ], ] - From df66e93ce4abf2cd10189f39ac0ed9d554312a11 Mon Sep 17 00:00:00 2001 From: Prateek Bhustali Date: Mon, 23 May 2022 12:57:27 +0200 Subject: [PATCH 34/88] Added Type Hints to Chatterjee module --- src/UQpy/sensitivity/chatterjee.py | 47 ++++++++++++++++++++++++------ 1 file changed, 38 insertions(+), 9 deletions(-) diff --git a/src/UQpy/sensitivity/chatterjee.py b/src/UQpy/sensitivity/chatterjee.py index 9bea38cdb..cdbd41b32 100644 --- a/src/UQpy/sensitivity/chatterjee.py +++ b/src/UQpy/sensitivity/chatterjee.py @@ -22,9 +22,19 @@ import numpy as np import scipy.stats +from beartype import beartype +from typing import Union +from numbers import Integral from UQpy.sensitivity.baseclass.sensitivity import Sensitivity from UQpy.sensitivity.sobol import compute_first_order as compute_first_order_sobol +from UQpy.utilities.ValidationTypes import ( + RandomStateType, + PositiveInteger, + PositiveFloat, + NumpyFloatArray, + NumpyIntArray, +) from UQpy.utilities.UQpyLoggingFormatter import UQpyLoggingFormatter @@ -80,12 +90,13 @@ def __init__(self, runmodel_object, dist_object, random_state=None, **kwargs): self.n_samples = None "Number of samples used to estimate the sensitivity indices, :class:`int`" + @beartype def run( self, - n_samples=1_000, - estimate_sobol_indices=False, - num_bootstrap_samples=None, - confidence_level=0.95, + n_samples: PositiveInteger = 1_000, + estimate_sobol_indices: bool = False, + num_bootstrap_samples: PositiveInteger = None, + confidence_level: PositiveFloat = 0.95, ): """ Compute the sensitivity indices using the Chatterjee method. @@ -191,7 +202,12 @@ def run( return computed_indices @staticmethod - def compute_chatterjee_indices(X, Y, seed=None): + @beartype + def compute_chatterjee_indices( + X: Union[NumpyFloatArray, NumpyIntArray], + Y: Union[NumpyFloatArray, NumpyIntArray], + seed: RandomStateType = None, + ): r""" Compute the Chatterjee sensitivity indices @@ -259,7 +275,10 @@ def compute_chatterjee_indices(X, Y, seed=None): return chatterjee_indices @staticmethod - def rank_analog_to_pickfreeze(X, j): + @beartype + def rank_analog_to_pickfreeze( + X: Union[NumpyFloatArray, NumpyIntArray], j: Integral + ): r""" Computing the :math:`N(j)` for each :math:`j \in \{1, \ldots, n\}` as in eq.(8) in [6]_, where :math:`n` is the size of :math:`X`. @@ -304,7 +323,8 @@ def rank_analog_to_pickfreeze(X, j): return np.where(rank_X == 0)[0][0] @staticmethod - def rank_analog_to_pickfreeze_vec(X): + @beartype + def rank_analog_to_pickfreeze_vec(X: Union[NumpyFloatArray, NumpyIntArray]): r""" Computing the :math:`N(j)` for each :math:`j \in \{1, \ldots, n\}` in a vectorized manner., where :math:`n` is the size of :math:`X`. @@ -372,7 +392,11 @@ def rank_analog_to_pickfreeze_vec(X): return N_func.astype(int) @staticmethod - def compute_Sobol_indices(A_model_evals, C_i_model_evals): + @beartype + def compute_Sobol_indices( + A_model_evals: Union[NumpyFloatArray, NumpyIntArray], + C_i_model_evals: Union[NumpyFloatArray, NumpyIntArray], + ): r""" A method to estimate the first order Sobol indices using the Chatterjee method. @@ -410,7 +434,12 @@ def compute_Sobol_indices(A_model_evals, C_i_model_evals): return first_order_sobol - def compute_rank_analog_of_f_C_i(self, A_samples, A_model_evals): + @beartype + def compute_rank_analog_of_f_C_i( + self, + A_samples: Union[NumpyFloatArray, NumpyIntArray], + A_model_evals: Union[NumpyFloatArray, NumpyIntArray], + ): r""" In the Pick and Freeze method, we use model evaluations :math:`f_A`, :math:`f_B`, :math:`f_{C_{i}}` From 358440614de4d3fb4b42de178b8c5965ef89dd0e Mon Sep 17 00:00:00 2001 From: Prateek Bhustali Date: Mon, 23 May 2022 12:57:37 +0200 Subject: [PATCH 35/88] Added Type Hints to CVM module --- src/UQpy/sensitivity/cramer_von_mises.py | 41 +++++++++++++++++------- 1 file changed, 29 insertions(+), 12 deletions(-) diff --git a/src/UQpy/sensitivity/cramer_von_mises.py b/src/UQpy/sensitivity/cramer_von_mises.py index 66421b440..b4e2bebd6 100644 --- a/src/UQpy/sensitivity/cramer_von_mises.py +++ b/src/UQpy/sensitivity/cramer_von_mises.py @@ -15,14 +15,23 @@ """ import logging +from typing import Union import numpy as np +from beartype import beartype from UQpy.sensitivity.baseclass.sensitivity import Sensitivity from UQpy.sensitivity.baseclass.pickfreeze import generate_pick_freeze_samples from UQpy.sensitivity.sobol import compute_first_order as compute_first_order_sobol from UQpy.sensitivity.sobol import compute_total_order as compute_total_order_sobol from UQpy.utilities.UQpyLoggingFormatter import UQpyLoggingFormatter +from UQpy.utilities.ValidationTypes import ( + PositiveInteger, + PositiveFloat, + NumpyFloatArray, + NumpyIntArray, +) + # TODO: Sampling strategies @@ -85,13 +94,14 @@ def __init__( self.num_vars = None "Number of input random variables, :class:`int`" + @beartype def run( self, - n_samples=1_000, - estimate_sobol_indices=False, - num_bootstrap_samples=None, - confidence_level=0.95, - disable_CVM_indices=False, + n_samples: PositiveInteger = 1_000, + estimate_sobol_indices: bool = False, + num_bootstrap_samples: PositiveInteger = None, + confidence_level: PositiveFloat = 0.95, + disable_CVM_indices: bool = False, ): """ @@ -243,7 +253,8 @@ def run( return computed_indices @staticmethod - def indicator_function(Y, W): + @beartype + def indicator_function(Y: Union[NumpyFloatArray, NumpyIntArray], w: float): """ Vectorized version of the indicator function. @@ -253,22 +264,28 @@ def indicator_function(Y, W): **Inputs:** * **Y** (`ndarray`): - Vector of values of the random variable. + Array of values of the random variable. Shape: `(N, 1)` - * **W** (`ndarray`): - Vector of values of the random variable. - Shape: `(N, 1)` + * **w** (`float`): + Value to compare with the array. **Outputs:** * **indicator** (`ndarray`): + Array of integers with truth values. Shape: `(N, 1)` """ - return (Y <= W.T).astype(int) + return (Y <= w).astype(int) - def pick_and_freeze_estimator(self, A_model_evals, W_model_evals, C_i_model_evals): + @beartype + def pick_and_freeze_estimator( + self, + A_model_evals: Union[NumpyFloatArray, NumpyIntArray], + W_model_evals: Union[NumpyFloatArray, NumpyIntArray], + C_i_model_evals: Union[NumpyFloatArray, NumpyIntArray], + ): """ Compute the first order CramĆ©r-von Mises indices From 9fcda77409fc41079c95ddf697253c716d420a0c Mon Sep 17 00:00:00 2001 From: Prateek Bhustali Date: Mon, 23 May 2022 12:57:56 +0200 Subject: [PATCH 36/88] Added Type Hints to GSI module --- src/UQpy/sensitivity/generalised_sobol.py | 26 ++++++++++++++++++----- 1 file changed, 21 insertions(+), 5 deletions(-) diff --git a/src/UQpy/sensitivity/generalised_sobol.py b/src/UQpy/sensitivity/generalised_sobol.py index e5cf2f654..5311941b6 100644 --- a/src/UQpy/sensitivity/generalised_sobol.py +++ b/src/UQpy/sensitivity/generalised_sobol.py @@ -26,9 +26,18 @@ import numpy as np +from typing import Union +from beartype import beartype + from UQpy.sensitivity.baseclass.sensitivity import Sensitivity from UQpy.sensitivity.baseclass.pickfreeze import generate_pick_freeze_samples from UQpy.utilities.UQpyLoggingFormatter import UQpyLoggingFormatter +from UQpy.utilities.ValidationTypes import ( + PositiveFloat, + PositiveInteger, + NumpyFloatArray, + NumpyIntArray, +) class GeneralisedSobol(Sensitivity): @@ -80,11 +89,12 @@ def __init__( self.num_vars = None "Number of model input variables, :class:`int`" + @beartype def run( self, - n_samples=1_000, - num_bootstrap_samples=None, - confidence_level=0.95, + n_samples: PositiveInteger = 1_000, + num_bootstrap_samples: PositiveInteger = None, + confidence_level: PositiveFloat = 0.95, ): """ @@ -247,8 +257,11 @@ def run( return computed_indices @staticmethod + @beartype def compute_first_order_generalised_sobol_indices( - A_model_evals, B_model_evals, C_i_model_evals + A_model_evals: Union[NumpyFloatArray, NumpyIntArray], + B_model_evals: Union[NumpyFloatArray, NumpyIntArray], + C_i_model_evals: Union[NumpyFloatArray, NumpyIntArray], ): """ @@ -313,8 +326,11 @@ def compute_first_order_generalised_sobol_indices( return gen_sobol_i @staticmethod + @beartype def compute_total_order_generalised_sobol_indices( - A_model_evals, B_model_evals, C_i_model_evals + A_model_evals: Union[NumpyFloatArray, NumpyIntArray], + B_model_evals: Union[NumpyFloatArray, NumpyIntArray], + C_i_model_evals: Union[NumpyFloatArray, NumpyIntArray], ): """ From 13d229e128489f550f5baa9ede391dd60256aa83 Mon Sep 17 00:00:00 2001 From: Prateek Bhustali Date: Mon, 23 May 2022 12:58:29 +0200 Subject: [PATCH 37/88] Added Type Hints to Sobol module --- src/UQpy/sensitivity/sobol.py | 56 +++++++++++++++++++++-------------- 1 file changed, 34 insertions(+), 22 deletions(-) diff --git a/src/UQpy/sensitivity/sobol.py b/src/UQpy/sensitivity/sobol.py index 0eb327aaf..3566a6fa4 100644 --- a/src/UQpy/sensitivity/sobol.py +++ b/src/UQpy/sensitivity/sobol.py @@ -50,12 +50,20 @@ import math import logging import itertools +from typing import Union import numpy as np +from beartype import beartype from UQpy.sensitivity.baseclass.sensitivity import Sensitivity from UQpy.sensitivity.baseclass.pickfreeze import generate_pick_freeze_samples from UQpy.utilities.UQpyLoggingFormatter import UQpyLoggingFormatter +from UQpy.utilities.ValidationTypes import ( + PositiveInteger, + PositiveFloat, + NumpyFloatArray, + NumpyIntArray, +) # TODO: Sampling strategies @@ -128,15 +136,16 @@ def __init__( self.multioutput = None "True if the model has multiple outputs, :class:`bool`" + @beartype def run( self, - n_samples=1_000, - num_bootstrap_samples=None, - confidence_level=0.95, - estimate_second_order=False, - first_order_scheme="Janon2014", - total_order_scheme="Homma1996", - second_order_scheme="Saltelli2002", + n_samples: PositiveInteger = 1_000, + num_bootstrap_samples: PositiveInteger = None, + confidence_level: PositiveFloat = 0.95, + estimate_second_order: bool = False, + first_order_scheme: str = "Janon2014", + total_order_scheme: str = "Homma1996", + second_order_scheme: str = "Saltelli2002", ): """ @@ -500,12 +509,13 @@ def run( """ +@beartype def compute_first_order( - A_model_evals, - B_model_evals, - C_i_model_evals, - D_i_model_evals=None, - scheme="Janon2014", + A_model_evals: Union[NumpyFloatArray, NumpyIntArray], + B_model_evals: Union[NumpyFloatArray, NumpyIntArray, None], + C_i_model_evals: NumpyFloatArray, + D_i_model_evals: Union[NumpyFloatArray, NumpyIntArray, None] = None, + scheme: str = "Janon2014", ): """ @@ -655,12 +665,13 @@ def compute_first_order( return first_order_sobol +@beartype def compute_total_order( - A_model_evals, - B_model_evals, - C_i_model_evals, - D_i_model_evals=None, - scheme="Homma1996", + A_model_evals: Union[NumpyFloatArray, NumpyIntArray], + B_model_evals: Union[NumpyFloatArray, NumpyIntArray, None], + C_i_model_evals: Union[NumpyFloatArray, NumpyIntArray], + D_i_model_evals: Union[NumpyFloatArray, NumpyIntArray, None] = None, + scheme: str = "Homma1996", ): """ @@ -759,13 +770,14 @@ def compute_total_order( return total_order_sobol +@beartype def compute_second_order( - A_model_evals, - B_model_evals, - C_i_model_evals, - D_i_model_evals, + A_model_evals: Union[NumpyFloatArray, NumpyIntArray], + B_model_evals: Union[NumpyFloatArray, NumpyIntArray], + C_i_model_evals: Union[NumpyFloatArray, NumpyIntArray], + D_i_model_evals: Union[NumpyFloatArray, NumpyIntArray], first_order_sobol=None, # None to make it a make keyword argument - scheme="Saltelli2002", + scheme: str = "Saltelli2002", ): """ Compute the second order Sobol indices using the Pick-and-Freeze scheme. From aff7d0965a335b342a367f5eadcd48ca1f5fdcd8 Mon Sep 17 00:00:00 2001 From: Prateek Bhustali Date: Mon, 23 May 2022 12:59:07 +0200 Subject: [PATCH 38/88] Added Type Hints to baseclass modules --- src/UQpy/sensitivity/baseclass/pickfreeze.py | 27 +++++++- src/UQpy/sensitivity/baseclass/sensitivity.py | 68 ++++++++----------- 2 files changed, 55 insertions(+), 40 deletions(-) diff --git a/src/UQpy/sensitivity/baseclass/pickfreeze.py b/src/UQpy/sensitivity/baseclass/pickfreeze.py index 4e9e2f57e..9806f76b5 100644 --- a/src/UQpy/sensitivity/baseclass/pickfreeze.py +++ b/src/UQpy/sensitivity/baseclass/pickfreeze.py @@ -1,11 +1,36 @@ import copy +from typing import Union +from beartype import beartype -def generate_pick_freeze_samples(dist_obj, n_samples, random_state=None): +from UQpy.distributions.collection import JointIndependent +from UQpy.utilities.ValidationTypes import ( + RandomStateType, + PositiveInteger, +) + + +@beartype +def generate_pick_freeze_samples( + dist_obj: Union[JointIndependent, Union[list, tuple]], + n_samples: PositiveInteger, + random_state: RandomStateType = None, +): """ Generate samples to be used in the Pick-and-Freeze algorithm. + **Inputs**: + + * **dist_obj** (`JointIndependent` or `list` or `tuple`): + A distribution object or a list or tuple of distribution objects. + + * **n_samples** (`int`): + The number of samples to be generated. + + * **random_state** (`None` or `int` or `numpy.random.RandomState`): + A random seed or a `numpy.random.RandomState` object. + **Outputs:** * **A_samples** (`ndarray`): diff --git a/src/UQpy/sensitivity/baseclass/sensitivity.py b/src/UQpy/sensitivity/baseclass/sensitivity.py index af2adc594..971fc9e7d 100644 --- a/src/UQpy/sensitivity/baseclass/sensitivity.py +++ b/src/UQpy/sensitivity/baseclass/sensitivity.py @@ -13,52 +13,38 @@ import numpy as np import scipy.stats +from typing import Union +from beartype import beartype + +from UQpy.distributions import * +from UQpy.utilities.ValidationTypes import ( + PositiveFloat, + RandomStateType, + PositiveInteger, + NumpyFloatArray, + NumpyIntArray, +) from UQpy.run_model import RunModel -from UQpy.distributions.baseclass import DistributionContinuous1D from UQpy.distributions.collection import JointIndependent class Sensitivity: + @beartype def __init__( - self, runmodel_object, dist_object, random_state=None, **kwargs + self, + runmodel_object: RunModel, + dist_object: Union[JointIndependent, Union[list, tuple]], + random_state: RandomStateType = None, + **kwargs, ) -> None: - # Check RunModel object - if not isinstance(runmodel_object, RunModel): - raise TypeError("UQpy: runmodel_object must be an object of class RunModel") - self.runmodel_object = runmodel_object - - # Check distributions - if isinstance(dist_object, list): - for i in range(len(dist_object)): - if not isinstance(dist_object[i], (DistributionContinuous1D, JointIndependent)): - raise TypeError( - "UQpy: A ``DistributionContinuous1D`` or ``JointInd`` object " - "must be provided." - ) - else: - if not isinstance(dist_object, (DistributionContinuous1D, JointIndependent)): - raise TypeError( - "UQpy: A ``DistributionContinuous1D`` or ``JointInd`` object must be provided." - ) - self.dist_object = dist_object - - # Check random state self.random_state = random_state - if isinstance(self.random_state, int): - self.random_state = np.random.RandomState(self.random_state) - elif not ( - self.random_state is None - or isinstance(self.random_state, np.random.RandomState) - ): - raise TypeError( - "UQpy: random state should be None, an integer or np.random.RandomState object" - ) # wrapper created for convenience to generate model evaluations - def _run_model(self, samples): + @beartype + def _run_model(self, samples: Union[NumpyFloatArray, NumpyIntArray]): """Generate model evaluations for a set of samples. **Inputs**: @@ -83,7 +69,8 @@ def _run_model(self, samples): return model_evals @staticmethod - def bootstrap_sample_generator_1D(samples): + @beartype + def bootstrap_sample_generator_1D(samples: Union[NumpyFloatArray, NumpyIntArray]): """Generate bootstrap samples. Generators are used to avoid copying the entire array. @@ -113,7 +100,8 @@ def bootstrap_sample_generator_1D(samples): yield samples[_indices] @staticmethod - def bootstrap_sample_generator_2D(samples): + @beartype + def bootstrap_sample_generator_2D(samples: Union[NumpyFloatArray, NumpyIntArray]): """Generate bootstrap samples. Generators are used to avoid copying the entire array. @@ -156,7 +144,8 @@ def bootstrap_sample_generator_2D(samples): yield samples[_indices, cols] @staticmethod - def bootstrap_sample_generator_3D(samples): + @beartype + def bootstrap_sample_generator_3D(samples: Union[NumpyFloatArray, NumpyIntArray]): """Generate bootstrap samples. Generators are used to avoid copying the entire array. @@ -190,13 +179,14 @@ def bootstrap_sample_generator_3D(samples): yield samples[:, _indices, cols] + @beartype def bootstrapping( self, estimator, estimator_inputs, - qoi_mean, - num_bootstrap_samples, - confidence_level=0.95, + qoi_mean: Union[NumpyFloatArray, NumpyIntArray], + num_bootstrap_samples: PositiveInteger = None, + confidence_level: PositiveFloat = 0.95, **kwargs, ): From 4ceb33c9193e56c3637057fd403022d07202d6a4 Mon Sep 17 00:00:00 2001 From: Prateek Bhustali Date: Mon, 23 May 2022 13:14:00 +0200 Subject: [PATCH 39/88] Changed Chatterjee module name to CamelCase --- docs/code/sensitivity/chatterjee/plot_chatterjee_exponential.py | 2 +- docs/code/sensitivity/chatterjee/plot_chatterjee_ishigami.py | 2 +- docs/code/sensitivity/chatterjee/plot_chatterjee_sobol_func.py | 2 +- src/UQpy/sensitivity/{chatterjee.py => Chatterjee.py} | 0 src/UQpy/sensitivity/__init__.py | 2 +- tests/unit_tests/sensitivity/test_chatterjee.py | 2 +- 6 files changed, 5 insertions(+), 5 deletions(-) rename src/UQpy/sensitivity/{chatterjee.py => Chatterjee.py} (100%) diff --git a/docs/code/sensitivity/chatterjee/plot_chatterjee_exponential.py b/docs/code/sensitivity/chatterjee/plot_chatterjee_exponential.py index 81d752653..8fa879847 100644 --- a/docs/code/sensitivity/chatterjee/plot_chatterjee_exponential.py +++ b/docs/code/sensitivity/chatterjee/plot_chatterjee_exponential.py @@ -21,7 +21,7 @@ from UQpy.run_model.model_execution.PythonModel import PythonModel from UQpy.distributions import Normal from UQpy.distributions.collection.JointIndependent import JointIndependent -from UQpy.sensitivity.chatterjee import Chatterjee +from UQpy.sensitivity.Chatterjee import Chatterjee # %% [markdown] # **Define the model and input distributions** diff --git a/docs/code/sensitivity/chatterjee/plot_chatterjee_ishigami.py b/docs/code/sensitivity/chatterjee/plot_chatterjee_ishigami.py index d3759fc10..448309e3a 100644 --- a/docs/code/sensitivity/chatterjee/plot_chatterjee_ishigami.py +++ b/docs/code/sensitivity/chatterjee/plot_chatterjee_ishigami.py @@ -21,7 +21,7 @@ from UQpy.run_model.model_execution.PythonModel import PythonModel from UQpy.distributions import Uniform from UQpy.distributions.collection.JointIndependent import JointIndependent -from UQpy.sensitivity.chatterjee import Chatterjee +from UQpy.sensitivity.Chatterjee import Chatterjee # %% [markdown] # **Define the model and input distributions** diff --git a/docs/code/sensitivity/chatterjee/plot_chatterjee_sobol_func.py b/docs/code/sensitivity/chatterjee/plot_chatterjee_sobol_func.py index 0169597e1..439ffaa85 100644 --- a/docs/code/sensitivity/chatterjee/plot_chatterjee_sobol_func.py +++ b/docs/code/sensitivity/chatterjee/plot_chatterjee_sobol_func.py @@ -32,7 +32,7 @@ from UQpy.run_model.model_execution.PythonModel import PythonModel from UQpy.distributions import Uniform from UQpy.distributions.collection.JointIndependent import JointIndependent -from UQpy.sensitivity.chatterjee import Chatterjee +from UQpy.sensitivity.Chatterjee import Chatterjee # %% [markdown] # **Define the model and input distributions** diff --git a/src/UQpy/sensitivity/chatterjee.py b/src/UQpy/sensitivity/Chatterjee.py similarity index 100% rename from src/UQpy/sensitivity/chatterjee.py rename to src/UQpy/sensitivity/Chatterjee.py diff --git a/src/UQpy/sensitivity/__init__.py b/src/UQpy/sensitivity/__init__.py index e25335a34..01d399fda 100644 --- a/src/UQpy/sensitivity/__init__.py +++ b/src/UQpy/sensitivity/__init__.py @@ -2,7 +2,7 @@ from UQpy.sensitivity.PceSensitivity import PceSensitivity from UQpy.sensitivity.sobol import Sobol from UQpy.sensitivity.cramer_von_mises import CramervonMises -from UQpy.sensitivity.chatterjee import Chatterjee +from UQpy.sensitivity.Chatterjee import Chatterjee from UQpy.sensitivity.generalised_sobol import GeneralisedSobol from . import MorrisSensitivity diff --git a/tests/unit_tests/sensitivity/test_chatterjee.py b/tests/unit_tests/sensitivity/test_chatterjee.py index 8a7c6495f..5912cdb2c 100644 --- a/tests/unit_tests/sensitivity/test_chatterjee.py +++ b/tests/unit_tests/sensitivity/test_chatterjee.py @@ -49,7 +49,7 @@ from UQpy.run_model.model_execution.PythonModel import PythonModel from UQpy.distributions import Uniform, Normal from UQpy.distributions.collection.JointIndependent import JointIndependent -from UQpy.sensitivity.chatterjee import Chatterjee +from UQpy.sensitivity.Chatterjee import Chatterjee # Prepare From 78ba451e7c00db6b6e1ac6320428230a2f870df1 Mon Sep 17 00:00:00 2001 From: Prateek Bhustali Date: Mon, 23 May 2022 13:16:52 +0200 Subject: [PATCH 40/88] Changed CVM module name to CamelCase --- docs/code/sensitivity/cramer_von_mises/plot_cvm_exponential.py | 2 +- docs/code/sensitivity/cramer_von_mises/plot_cvm_sobol_func.py | 2 +- src/UQpy/sensitivity/{cramer_von_mises.py => CramervonMises.py} | 0 src/UQpy/sensitivity/__init__.py | 2 +- tests/unit_tests/sensitivity/test_cramer_von_mises.py | 2 +- 5 files changed, 4 insertions(+), 4 deletions(-) rename src/UQpy/sensitivity/{cramer_von_mises.py => CramervonMises.py} (100%) diff --git a/docs/code/sensitivity/cramer_von_mises/plot_cvm_exponential.py b/docs/code/sensitivity/cramer_von_mises/plot_cvm_exponential.py index e6949a71b..28b390a47 100644 --- a/docs/code/sensitivity/cramer_von_mises/plot_cvm_exponential.py +++ b/docs/code/sensitivity/cramer_von_mises/plot_cvm_exponential.py @@ -20,7 +20,7 @@ from UQpy.run_model.model_execution.PythonModel import PythonModel from UQpy.distributions import Normal from UQpy.distributions.collection.JointIndependent import JointIndependent -from UQpy.sensitivity.cramer_von_mises import CramervonMises as cvm +from UQpy.sensitivity.CramervonMises import CramervonMises as cvm # %% [markdown] # **Define the model and input distributions** diff --git a/docs/code/sensitivity/cramer_von_mises/plot_cvm_sobol_func.py b/docs/code/sensitivity/cramer_von_mises/plot_cvm_sobol_func.py index ff86ab30e..7500c7259 100644 --- a/docs/code/sensitivity/cramer_von_mises/plot_cvm_sobol_func.py +++ b/docs/code/sensitivity/cramer_von_mises/plot_cvm_sobol_func.py @@ -25,7 +25,7 @@ from UQpy.run_model.model_execution.PythonModel import PythonModel from UQpy.distributions import Uniform from UQpy.distributions.collection.JointIndependent import JointIndependent -from UQpy.sensitivity.cramer_von_mises import CramervonMises as cvm +from UQpy.sensitivity.CramervonMises import CramervonMises as cvm # %% [markdown] # **Define the model and input distributions** diff --git a/src/UQpy/sensitivity/cramer_von_mises.py b/src/UQpy/sensitivity/CramervonMises.py similarity index 100% rename from src/UQpy/sensitivity/cramer_von_mises.py rename to src/UQpy/sensitivity/CramervonMises.py diff --git a/src/UQpy/sensitivity/__init__.py b/src/UQpy/sensitivity/__init__.py index 01d399fda..15bddfb02 100644 --- a/src/UQpy/sensitivity/__init__.py +++ b/src/UQpy/sensitivity/__init__.py @@ -1,7 +1,7 @@ from UQpy.sensitivity.MorrisSensitivity import MorrisSensitivity from UQpy.sensitivity.PceSensitivity import PceSensitivity from UQpy.sensitivity.sobol import Sobol -from UQpy.sensitivity.cramer_von_mises import CramervonMises +from UQpy.sensitivity.CramervonMises import CramervonMises from UQpy.sensitivity.Chatterjee import Chatterjee from UQpy.sensitivity.generalised_sobol import GeneralisedSobol diff --git a/tests/unit_tests/sensitivity/test_cramer_von_mises.py b/tests/unit_tests/sensitivity/test_cramer_von_mises.py index 46cebb429..c94ddbae0 100644 --- a/tests/unit_tests/sensitivity/test_cramer_von_mises.py +++ b/tests/unit_tests/sensitivity/test_cramer_von_mises.py @@ -47,7 +47,7 @@ from UQpy.run_model.model_execution.PythonModel import PythonModel from UQpy.distributions import Normal, Uniform from UQpy.distributions.collection.JointIndependent import JointIndependent -from UQpy.sensitivity.cramer_von_mises import CramervonMises +from UQpy.sensitivity.CramervonMises import CramervonMises # Prepare ############################################################################### From f9eec9ac7896b0e50999b7f11a469ad51fb5b27a Mon Sep 17 00:00:00 2001 From: Prateek Bhustali Date: Mon, 23 May 2022 13:19:28 +0200 Subject: [PATCH 41/88] Changed GSI module name to CamelCase --- .../plot_generalised_sobol_mechanical_oscillator_ODE.py | 2 +- .../generalised_sobol/plot_generalised_sobol_multioutput.py | 2 +- .../sensitivity/{generalised_sobol.py => GeneralisedSobol.py} | 0 src/UQpy/sensitivity/__init__.py | 2 +- tests/unit_tests/sensitivity/test_generalised_sobol.py | 2 +- 5 files changed, 4 insertions(+), 4 deletions(-) rename src/UQpy/sensitivity/{generalised_sobol.py => GeneralisedSobol.py} (100%) diff --git a/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_mechanical_oscillator_ODE.py b/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_mechanical_oscillator_ODE.py index 361bde6aa..716c498f7 100644 --- a/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_mechanical_oscillator_ODE.py +++ b/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_mechanical_oscillator_ODE.py @@ -29,7 +29,7 @@ from UQpy.run_model.model_execution.PythonModel import PythonModel from UQpy.distributions import Uniform, Normal from UQpy.distributions.collection.JointIndependent import JointIndependent -from UQpy.sensitivity.generalised_sobol import GeneralisedSobol +from UQpy.sensitivity.GeneralisedSobol import GeneralisedSobol # %% [markdown] # **Define the model and input distributions** diff --git a/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_multioutput.py b/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_multioutput.py index 1b673ddcf..af4ca6ff3 100644 --- a/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_multioutput.py +++ b/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_multioutput.py @@ -22,7 +22,7 @@ from UQpy.run_model.model_execution.PythonModel import PythonModel from UQpy.distributions import Uniform, Normal from UQpy.distributions.collection.JointIndependent import JointIndependent -from UQpy.sensitivity.generalised_sobol import GeneralisedSobol +from UQpy.sensitivity.GeneralisedSobol import GeneralisedSobol # %% [markdown] # **Define the model and input distributions** diff --git a/src/UQpy/sensitivity/generalised_sobol.py b/src/UQpy/sensitivity/GeneralisedSobol.py similarity index 100% rename from src/UQpy/sensitivity/generalised_sobol.py rename to src/UQpy/sensitivity/GeneralisedSobol.py diff --git a/src/UQpy/sensitivity/__init__.py b/src/UQpy/sensitivity/__init__.py index 15bddfb02..f391bebd2 100644 --- a/src/UQpy/sensitivity/__init__.py +++ b/src/UQpy/sensitivity/__init__.py @@ -3,7 +3,7 @@ from UQpy.sensitivity.sobol import Sobol from UQpy.sensitivity.CramervonMises import CramervonMises from UQpy.sensitivity.Chatterjee import Chatterjee -from UQpy.sensitivity.generalised_sobol import GeneralisedSobol +from UQpy.sensitivity.GeneralisedSobol import GeneralisedSobol from . import MorrisSensitivity from . import PceSensitivity diff --git a/tests/unit_tests/sensitivity/test_generalised_sobol.py b/tests/unit_tests/sensitivity/test_generalised_sobol.py index c759d85bb..3b5df3167 100644 --- a/tests/unit_tests/sensitivity/test_generalised_sobol.py +++ b/tests/unit_tests/sensitivity/test_generalised_sobol.py @@ -50,7 +50,7 @@ from UQpy.run_model.model_execution.PythonModel import PythonModel from UQpy.distributions import Uniform, Normal from UQpy.distributions.collection.JointIndependent import JointIndependent -from UQpy.sensitivity.generalised_sobol import GeneralisedSobol +from UQpy.sensitivity.GeneralisedSobol import GeneralisedSobol # Prepare ############################################################################### From bfa2c53e0b7252f84d2841c33652618d79e064a0 Mon Sep 17 00:00:00 2001 From: Prateek Bhustali Date: Mon, 23 May 2022 13:20:51 +0200 Subject: [PATCH 42/88] Changed Sobol module name to CamelCase --- docs/code/sensitivity/sobol/plot_mechanical_oscillator_ODE.py | 2 +- docs/code/sensitivity/sobol/plot_sobol_additive.py | 2 +- docs/code/sensitivity/sobol/plot_sobol_func.py | 2 +- docs/code/sensitivity/sobol/plot_sobol_ishigami.py | 2 +- src/UQpy/sensitivity/Chatterjee.py | 2 +- src/UQpy/sensitivity/CramervonMises.py | 4 ++-- src/UQpy/sensitivity/{sobol.py => Sobol.py} | 0 src/UQpy/sensitivity/__init__.py | 2 +- tests/unit_tests/sensitivity/test_baseclass.py | 2 +- tests/unit_tests/sensitivity/test_sobol.py | 2 +- 10 files changed, 10 insertions(+), 10 deletions(-) rename src/UQpy/sensitivity/{sobol.py => Sobol.py} (100%) diff --git a/docs/code/sensitivity/sobol/plot_mechanical_oscillator_ODE.py b/docs/code/sensitivity/sobol/plot_mechanical_oscillator_ODE.py index 05636321f..06d1a66b1 100644 --- a/docs/code/sensitivity/sobol/plot_mechanical_oscillator_ODE.py +++ b/docs/code/sensitivity/sobol/plot_mechanical_oscillator_ODE.py @@ -30,7 +30,7 @@ from UQpy.run_model.model_execution.PythonModel import PythonModel from UQpy.distributions import Uniform from UQpy.distributions.collection.JointIndependent import JointIndependent -from UQpy.sensitivity.sobol import Sobol +from UQpy.sensitivity.Sobol import Sobol # %% [markdown] # **Define the model and input distributions** diff --git a/docs/code/sensitivity/sobol/plot_sobol_additive.py b/docs/code/sensitivity/sobol/plot_sobol_additive.py index 4645ed749..973e97dd6 100644 --- a/docs/code/sensitivity/sobol/plot_sobol_additive.py +++ b/docs/code/sensitivity/sobol/plot_sobol_additive.py @@ -13,7 +13,7 @@ from UQpy.run_model.model_execution.PythonModel import PythonModel from UQpy.distributions import Normal from UQpy.distributions.collection.JointIndependent import JointIndependent -from UQpy.sensitivity.sobol import Sobol +from UQpy.sensitivity.Sobol import Sobol # %% [markdown] # **Define the model and input distributions** diff --git a/docs/code/sensitivity/sobol/plot_sobol_func.py b/docs/code/sensitivity/sobol/plot_sobol_func.py index d2640955b..5a5cb9389 100644 --- a/docs/code/sensitivity/sobol/plot_sobol_func.py +++ b/docs/code/sensitivity/sobol/plot_sobol_func.py @@ -25,7 +25,7 @@ from UQpy.run_model.model_execution.PythonModel import PythonModel from UQpy.distributions import Uniform from UQpy.distributions.collection.JointIndependent import JointIndependent -from UQpy.sensitivity.sobol import Sobol +from UQpy.sensitivity.Sobol import Sobol # %% [markdown] # **Define the model and input distributions** diff --git a/docs/code/sensitivity/sobol/plot_sobol_ishigami.py b/docs/code/sensitivity/sobol/plot_sobol_ishigami.py index d04649811..dc118034f 100644 --- a/docs/code/sensitivity/sobol/plot_sobol_ishigami.py +++ b/docs/code/sensitivity/sobol/plot_sobol_ishigami.py @@ -45,7 +45,7 @@ from UQpy.run_model.model_execution.PythonModel import PythonModel from UQpy.distributions import Uniform from UQpy.distributions.collection.JointIndependent import JointIndependent -from UQpy.sensitivity.sobol import Sobol +from UQpy.sensitivity.Sobol import Sobol # %% [markdown] # **Define the model and input distributions** diff --git a/src/UQpy/sensitivity/Chatterjee.py b/src/UQpy/sensitivity/Chatterjee.py index cdbd41b32..d694d59db 100644 --- a/src/UQpy/sensitivity/Chatterjee.py +++ b/src/UQpy/sensitivity/Chatterjee.py @@ -27,7 +27,7 @@ from numbers import Integral from UQpy.sensitivity.baseclass.sensitivity import Sensitivity -from UQpy.sensitivity.sobol import compute_first_order as compute_first_order_sobol +from UQpy.sensitivity.Sobol import compute_first_order as compute_first_order_sobol from UQpy.utilities.ValidationTypes import ( RandomStateType, PositiveInteger, diff --git a/src/UQpy/sensitivity/CramervonMises.py b/src/UQpy/sensitivity/CramervonMises.py index b4e2bebd6..cb3ef2ed6 100644 --- a/src/UQpy/sensitivity/CramervonMises.py +++ b/src/UQpy/sensitivity/CramervonMises.py @@ -22,8 +22,8 @@ from UQpy.sensitivity.baseclass.sensitivity import Sensitivity from UQpy.sensitivity.baseclass.pickfreeze import generate_pick_freeze_samples -from UQpy.sensitivity.sobol import compute_first_order as compute_first_order_sobol -from UQpy.sensitivity.sobol import compute_total_order as compute_total_order_sobol +from UQpy.sensitivity.Sobol import compute_first_order as compute_first_order_sobol +from UQpy.sensitivity.Sobol import compute_total_order as compute_total_order_sobol from UQpy.utilities.UQpyLoggingFormatter import UQpyLoggingFormatter from UQpy.utilities.ValidationTypes import ( PositiveInteger, diff --git a/src/UQpy/sensitivity/sobol.py b/src/UQpy/sensitivity/Sobol.py similarity index 100% rename from src/UQpy/sensitivity/sobol.py rename to src/UQpy/sensitivity/Sobol.py diff --git a/src/UQpy/sensitivity/__init__.py b/src/UQpy/sensitivity/__init__.py index f391bebd2..2433a768b 100644 --- a/src/UQpy/sensitivity/__init__.py +++ b/src/UQpy/sensitivity/__init__.py @@ -1,6 +1,6 @@ from UQpy.sensitivity.MorrisSensitivity import MorrisSensitivity from UQpy.sensitivity.PceSensitivity import PceSensitivity -from UQpy.sensitivity.sobol import Sobol +from UQpy.sensitivity.Sobol import Sobol from UQpy.sensitivity.CramervonMises import CramervonMises from UQpy.sensitivity.Chatterjee import Chatterjee from UQpy.sensitivity.GeneralisedSobol import GeneralisedSobol diff --git a/tests/unit_tests/sensitivity/test_baseclass.py b/tests/unit_tests/sensitivity/test_baseclass.py index 458826d6f..9c1db1810 100644 --- a/tests/unit_tests/sensitivity/test_baseclass.py +++ b/tests/unit_tests/sensitivity/test_baseclass.py @@ -17,7 +17,7 @@ from UQpy.run_model.model_execution.PythonModel import PythonModel from UQpy.distributions import Uniform from UQpy.distributions.collection.JointIndependent import JointIndependent -from UQpy.sensitivity.sobol import Sobol +from UQpy.sensitivity.Sobol import Sobol from UQpy.sensitivity.baseclass.pickfreeze import generate_pick_freeze_samples # Prepare diff --git a/tests/unit_tests/sensitivity/test_sobol.py b/tests/unit_tests/sensitivity/test_sobol.py index 784eb6143..3c1f11700 100644 --- a/tests/unit_tests/sensitivity/test_sobol.py +++ b/tests/unit_tests/sensitivity/test_sobol.py @@ -55,7 +55,7 @@ from UQpy.run_model.model_execution.PythonModel import PythonModel from UQpy.distributions import Uniform from UQpy.distributions.collection.JointIndependent import JointIndependent -from UQpy.sensitivity.sobol import Sobol +from UQpy.sensitivity.Sobol import Sobol # Prepare ############################################################################### From e0ad8da9db9eb20330f53ffc8b3cb4139ebcdc37 Mon Sep 17 00:00:00 2001 From: Prateek Bhustali Date: Mon, 23 May 2022 13:22:58 +0200 Subject: [PATCH 43/88] Changed baseclass module names to CamelCase --- src/UQpy/sensitivity/Chatterjee.py | 2 +- src/UQpy/sensitivity/CramervonMises.py | 4 ++-- src/UQpy/sensitivity/GeneralisedSobol.py | 4 ++-- src/UQpy/sensitivity/Sobol.py | 4 ++-- .../sensitivity/baseclass/{pickfreeze.py => PickFreeze.py} | 0 .../sensitivity/baseclass/{sensitivity.py => Sensitivity.py} | 0 src/UQpy/sensitivity/baseclass/__init__.py | 4 ++-- tests/unit_tests/sensitivity/test_baseclass.py | 2 +- 8 files changed, 10 insertions(+), 10 deletions(-) rename src/UQpy/sensitivity/baseclass/{pickfreeze.py => PickFreeze.py} (100%) rename src/UQpy/sensitivity/baseclass/{sensitivity.py => Sensitivity.py} (100%) diff --git a/src/UQpy/sensitivity/Chatterjee.py b/src/UQpy/sensitivity/Chatterjee.py index d694d59db..1eb13ff19 100644 --- a/src/UQpy/sensitivity/Chatterjee.py +++ b/src/UQpy/sensitivity/Chatterjee.py @@ -26,7 +26,7 @@ from typing import Union from numbers import Integral -from UQpy.sensitivity.baseclass.sensitivity import Sensitivity +from UQpy.sensitivity.baseclass.Sensitivity import Sensitivity from UQpy.sensitivity.Sobol import compute_first_order as compute_first_order_sobol from UQpy.utilities.ValidationTypes import ( RandomStateType, diff --git a/src/UQpy/sensitivity/CramervonMises.py b/src/UQpy/sensitivity/CramervonMises.py index cb3ef2ed6..f6507274a 100644 --- a/src/UQpy/sensitivity/CramervonMises.py +++ b/src/UQpy/sensitivity/CramervonMises.py @@ -20,8 +20,8 @@ import numpy as np from beartype import beartype -from UQpy.sensitivity.baseclass.sensitivity import Sensitivity -from UQpy.sensitivity.baseclass.pickfreeze import generate_pick_freeze_samples +from UQpy.sensitivity.baseclass.Sensitivity import Sensitivity +from UQpy.sensitivity.baseclass.PickFreeze import generate_pick_freeze_samples from UQpy.sensitivity.Sobol import compute_first_order as compute_first_order_sobol from UQpy.sensitivity.Sobol import compute_total_order as compute_total_order_sobol from UQpy.utilities.UQpyLoggingFormatter import UQpyLoggingFormatter diff --git a/src/UQpy/sensitivity/GeneralisedSobol.py b/src/UQpy/sensitivity/GeneralisedSobol.py index 5311941b6..1b8764ca3 100644 --- a/src/UQpy/sensitivity/GeneralisedSobol.py +++ b/src/UQpy/sensitivity/GeneralisedSobol.py @@ -29,8 +29,8 @@ from typing import Union from beartype import beartype -from UQpy.sensitivity.baseclass.sensitivity import Sensitivity -from UQpy.sensitivity.baseclass.pickfreeze import generate_pick_freeze_samples +from UQpy.sensitivity.baseclass.Sensitivity import Sensitivity +from UQpy.sensitivity.baseclass.PickFreeze import generate_pick_freeze_samples from UQpy.utilities.UQpyLoggingFormatter import UQpyLoggingFormatter from UQpy.utilities.ValidationTypes import ( PositiveFloat, diff --git a/src/UQpy/sensitivity/Sobol.py b/src/UQpy/sensitivity/Sobol.py index 3566a6fa4..99daec702 100644 --- a/src/UQpy/sensitivity/Sobol.py +++ b/src/UQpy/sensitivity/Sobol.py @@ -55,8 +55,8 @@ import numpy as np from beartype import beartype -from UQpy.sensitivity.baseclass.sensitivity import Sensitivity -from UQpy.sensitivity.baseclass.pickfreeze import generate_pick_freeze_samples +from UQpy.sensitivity.baseclass.Sensitivity import Sensitivity +from UQpy.sensitivity.baseclass.PickFreeze import generate_pick_freeze_samples from UQpy.utilities.UQpyLoggingFormatter import UQpyLoggingFormatter from UQpy.utilities.ValidationTypes import ( PositiveInteger, diff --git a/src/UQpy/sensitivity/baseclass/pickfreeze.py b/src/UQpy/sensitivity/baseclass/PickFreeze.py similarity index 100% rename from src/UQpy/sensitivity/baseclass/pickfreeze.py rename to src/UQpy/sensitivity/baseclass/PickFreeze.py diff --git a/src/UQpy/sensitivity/baseclass/sensitivity.py b/src/UQpy/sensitivity/baseclass/Sensitivity.py similarity index 100% rename from src/UQpy/sensitivity/baseclass/sensitivity.py rename to src/UQpy/sensitivity/baseclass/Sensitivity.py diff --git a/src/UQpy/sensitivity/baseclass/__init__.py b/src/UQpy/sensitivity/baseclass/__init__.py index 7e11a2b63..99b9c2d0a 100644 --- a/src/UQpy/sensitivity/baseclass/__init__.py +++ b/src/UQpy/sensitivity/baseclass/__init__.py @@ -1,2 +1,2 @@ -from UQpy.sensitivity.baseclass.sensitivity import * -from UQpy.sensitivity.baseclass.pickfreeze import * +from UQpy.sensitivity.baseclass.Sensitivity import * +from UQpy.sensitivity.baseclass.PickFreeze import * diff --git a/tests/unit_tests/sensitivity/test_baseclass.py b/tests/unit_tests/sensitivity/test_baseclass.py index 9c1db1810..724abb298 100644 --- a/tests/unit_tests/sensitivity/test_baseclass.py +++ b/tests/unit_tests/sensitivity/test_baseclass.py @@ -18,7 +18,7 @@ from UQpy.distributions import Uniform from UQpy.distributions.collection.JointIndependent import JointIndependent from UQpy.sensitivity.Sobol import Sobol -from UQpy.sensitivity.baseclass.pickfreeze import generate_pick_freeze_samples +from UQpy.sensitivity.baseclass.PickFreeze import generate_pick_freeze_samples # Prepare ############################################################################### From d78b28ba8da7405d111d2715a459c694aa023697 Mon Sep 17 00:00:00 2001 From: Prateek Bhustali Date: Mon, 23 May 2022 14:55:26 +0200 Subject: [PATCH 44/88] Changed variable name: CI -> confidence interval --- src/UQpy/sensitivity/Chatterjee.py | 10 ++++--- src/UQpy/sensitivity/CramervonMises.py | 10 ++++--- src/UQpy/sensitivity/GeneralisedSobol.py | 16 ++++++---- src/UQpy/sensitivity/Sobol.py | 30 +++++++++++-------- .../sensitivity/test_cramer_von_mises.py | 2 +- .../sensitivity/test_generalised_sobol.py | 6 ++-- tests/unit_tests/sensitivity/test_sobol.py | 10 ++++--- 7 files changed, 51 insertions(+), 33 deletions(-) diff --git a/src/UQpy/sensitivity/Chatterjee.py b/src/UQpy/sensitivity/Chatterjee.py index 1eb13ff19..a171e6247 100644 --- a/src/UQpy/sensitivity/Chatterjee.py +++ b/src/UQpy/sensitivity/Chatterjee.py @@ -81,7 +81,7 @@ def __init__(self, runmodel_object, dist_object, random_state=None, **kwargs): self.sobol_i = None "Sobol indices computed using the rank statistics, :class:`numpy.ndarray` of shape :code:`(num_vars, 1)`" - self.CI_chatterjee_i = None + self.confidence_interval_chatterjee_i = None "Confidence intervals for the Chatterjee sensitivity indices, :class:`numpy.ndarray` of shape :code:`(num_vars, 2)`" self.num_vars = None @@ -115,7 +115,7 @@ def run( :return: A :class:`dict` with the following keys: \ :code:`'chatterjee_i'` of shape :code:`(num_vars, 1)`, \ - :code:`'CI_chatterjee_i'` of shape :code:`(num_vars, 2)`, \ + :code:`'confidence_interval_chatterjee_i'` of shape :code:`(num_vars, 2)`, \ :code:`'sobol_i'` of shape :code:`(num_vars, 1)`. """ @@ -185,7 +185,7 @@ def run( estimator_inputs = [A_samples, A_model_evals] - self.CI_chatterjee_i = self.bootstrapping( + self.confidence_interval_chatterjee_i = self.bootstrapping( self.compute_chatterjee_indices, estimator_inputs, computed_indices["chatterjee_i"], @@ -197,7 +197,9 @@ def run( "UQpy: Confidence intervals for Chatterjee indices computed successfully.\n" ) - computed_indices["CI_chatterjee_i"] = self.CI_chatterjee_i + computed_indices[ + "confidence_interval_chatterjee_i" + ] = self.confidence_interval_chatterjee_i return computed_indices diff --git a/src/UQpy/sensitivity/CramervonMises.py b/src/UQpy/sensitivity/CramervonMises.py index f6507274a..745557cd9 100644 --- a/src/UQpy/sensitivity/CramervonMises.py +++ b/src/UQpy/sensitivity/CramervonMises.py @@ -79,7 +79,7 @@ def __init__( self.CVM_i = None "First order CramĆ©r-von Mises indices, :class:`numpy.ndarray` of shape :code:`(num_vars, 1)`" - self.CI_CVM_i = None + self.confidence_interval_CVM_i = None "Confidence intervals of the first order CramĆ©r-von Mises indices, :class:`numpy.ndarray` of shape :code:`(num_vars, 2)`" self.sobol_i = None @@ -124,7 +124,7 @@ def run( :return: A :class:`dict` with the following keys: \ :code:`CVM_i` of shape :code:`(num_vars, 1)`, \ - :code:`CI_CVM_i` of shape :code:`(num_vars, 2)`, \ + :code:`confidence_interval_CVM_i` of shape :code:`(num_vars, 2)`, \ :code:`sobol_i` of shape :code:`(num_vars, 1)`, \ :code:`sobol_total_i` of shape :code:`(num_vars, 1)`. @@ -205,7 +205,7 @@ def run( C_i_model_evals, ] - self.CI_CVM_i = self.bootstrapping( + self.confidence_interval_CVM_i = self.bootstrapping( self.pick_and_freeze_estimator, estimator_inputs, computed_indices["CVM_i"], @@ -218,7 +218,9 @@ def run( ) # Store the indices in the dictionary - computed_indices["CI_CVM_i"] = self.CI_CVM_i + computed_indices[ + "confidence_interval_CVM_i" + ] = self.confidence_interval_CVM_i ################## COMPUTE SOBOL INDICES ################## diff --git a/src/UQpy/sensitivity/GeneralisedSobol.py b/src/UQpy/sensitivity/GeneralisedSobol.py index 1b8764ca3..2a976e004 100644 --- a/src/UQpy/sensitivity/GeneralisedSobol.py +++ b/src/UQpy/sensitivity/GeneralisedSobol.py @@ -113,8 +113,8 @@ def run( :return: A :class:`dict` with the following keys: \ :code:`gen_sobol_i` of shape :code:`(num_vars, 1)`, \ :code:`gen_sobol_total_i` of shape :code:`(num_vars, 1)`, \ - :code:`CI_gen_sobol_i` of shape :code:`(num_vars, 2)`, \ - :code:`CI_gen_sobol_total_i` of shape :code:`(num_vars, 2)`. + :code:`confidence_interval_gen_sobol_i` of shape :code:`(num_vars, 2)`, \ + :code:`confidence_interval_gen_sobol_total_i` of shape :code:`(num_vars, 2)`. """ @@ -225,7 +225,7 @@ def run( ] # First order generalised Sobol indices - self.CI_gen_sobol_i = self.bootstrapping( + self.confidence_interval_gen_sobol_i = self.bootstrapping( self.compute_first_order_generalised_sobol_indices, estimator_inputs, computed_indices["gen_sobol_i"], @@ -238,7 +238,7 @@ def run( ) # Total order generalised Sobol indices - self.CI_gen_sobol_total_i = self.bootstrapping( + self.confidence_interval_gen_sobol_total_i = self.bootstrapping( self.compute_total_order_generalised_sobol_indices, estimator_inputs, computed_indices["gen_sobol_total_i"], @@ -251,8 +251,12 @@ def run( ) # Store the indices in the dictionary - computed_indices["CI_gen_sobol_i"] = self.CI_gen_sobol_i - computed_indices["CI_gen_sobol_total_i"] = self.CI_gen_sobol_total_i + computed_indices[ + "confidence_interval_gen_sobol_i" + ] = self.confidence_interval_gen_sobol_i + computed_indices[ + "confidence_interval_gen_sobol_total_i" + ] = self.confidence_interval_gen_sobol_total_i return computed_indices diff --git a/src/UQpy/sensitivity/Sobol.py b/src/UQpy/sensitivity/Sobol.py index 99daec702..84e9b3510 100644 --- a/src/UQpy/sensitivity/Sobol.py +++ b/src/UQpy/sensitivity/Sobol.py @@ -118,13 +118,13 @@ def __init__( self.sobol_ij = None "Second order Sobol indices, :class:`numpy.ndarray` of shape `(num_second_order_terms, n_outputs)`" - self.CI_sobol_i = None + self.confidence_interval_sobol_i = None "Confidence intervals for the first order Sobol indices, :class:`numpy.ndarray` of shape `(num_vars, 2)`" - self.CI_sobol_total_i = None + self.confidence_interval_sobol_total_i = None "Confidence intervals for the total order Sobol indices, :class:`numpy.ndarray` of shape `(num_vars, 2)`" - self.CI_sobol_ij = None + self.confidence_interval_sobol_ij = None "Confidence intervals for the second order Sobol indices, :class:`numpy.ndarray` of shape `(num_second_order_terms, 2)`" self.n_samples = None @@ -176,11 +176,11 @@ def run( :code:`sobol_i` of shape :code:`(num_vars, 1)`, \ :code:`sobol_total_i` of shape :code:`(num_vars, 1)`, \ :code:`sobol_ij` of shape :code:`(num_second_order_terms, 1)`, \ - :code:`CI_sobol_i` of shape :code:`(num_vars, 2)`, \ + :code:`confidence_interval_sobol_i` of shape :code:`(num_vars, 2)`, \ if multioutput: Shape: `(n_outputs, num_vars, 2)`, \ - :code:`CI_sobol_total_i` of shape :code:`(num_vars, 2)`, \ + :code:`confidence_interval_sobol_total_i` of shape :code:`(num_vars, 2)`, \ if multioutput: Shape: `(n_outputs, num_vars, 2)`, \ - :code:`CI_sobol_ij` of shape :code:`(num_second_order_terms, 2)` + :code:`confidence_interval_sobol_ij` of shape :code:`(num_second_order_terms, 2)` if multioutput: Shape: `(n_outputs, num_second_order_terms, 2)`, \ """ @@ -321,7 +321,7 @@ def run( ] # First order Sobol indices - self.CI_sobol_i = self.bootstrapping( + self.confidence_interval_sobol_i = self.bootstrapping( compute_first_order, estimator_inputs, computed_indices["sobol_i"], @@ -334,10 +334,12 @@ def run( "UQpy: Confidence intervals for First order Sobol indices computed successfully." ) - computed_indices["CI_sobol_i"] = self.CI_sobol_i + computed_indices[ + "confidence_interval_sobol_i" + ] = self.confidence_interval_sobol_i # Total order Sobol indices - self.CI_sobol_total_i = self.bootstrapping( + self.confidence_interval_sobol_total_i = self.bootstrapping( compute_total_order, estimator_inputs, computed_indices["sobol_total_i"], @@ -350,11 +352,13 @@ def run( "UQpy: Confidence intervals for Total order Sobol indices computed successfully." ) - computed_indices["CI_sobol_total_i"] = self.CI_sobol_total_i + computed_indices[ + "confidence_interval_sobol_total_i" + ] = self.confidence_interval_sobol_total_i # Second order Sobol indices if estimate_second_order: - self.CI_sobol_ij = self.bootstrapping( + self.confidence_interval_sobol_ij = self.bootstrapping( compute_second_order, estimator_inputs, computed_indices["sobol_ij"], @@ -368,7 +372,9 @@ def run( "UQpy: Confidence intervals for Second order Sobol indices computed successfully." ) - computed_indices["CI_sobol_ij"] = self.CI_sobol_ij + computed_indices[ + "confidence_interval_sobol_ij" + ] = self.confidence_interval_sobol_ij return computed_indices diff --git a/tests/unit_tests/sensitivity/test_cramer_von_mises.py b/tests/unit_tests/sensitivity/test_cramer_von_mises.py index c94ddbae0..ed9a55b24 100644 --- a/tests/unit_tests/sensitivity/test_cramer_von_mises.py +++ b/tests/unit_tests/sensitivity/test_cramer_von_mises.py @@ -152,7 +152,7 @@ def bootstrap_CVM_index_variance(CVM_object, NUM_SAMPLES): ) First_order = computed_indices["CVM_i"].ravel() - upper_bound_first_order = computed_indices["CI_CVM_i"][:, 1] + upper_bound_first_order = computed_indices["confidence_interval_CVM_i"][:, 1] #### Compute variance #### std_bootstrap_first_order = (upper_bound_first_order - First_order) / delta diff --git a/tests/unit_tests/sensitivity/test_generalised_sobol.py b/tests/unit_tests/sensitivity/test_generalised_sobol.py index 3b5df3167..0b1f0919c 100644 --- a/tests/unit_tests/sensitivity/test_generalised_sobol.py +++ b/tests/unit_tests/sensitivity/test_generalised_sobol.py @@ -209,8 +209,10 @@ def bootstrap_generalised_sobol_index_variance( gen_sobol_i = computed_indices["gen_sobol_i"].ravel() gen_sobol_total_i = computed_indices["gen_sobol_total_i"].ravel() - upper_bound_first_order = computed_indices["CI_gen_sobol_i"][:, 1] - upper_bound_total_order = computed_indices["CI_gen_sobol_total_i"][:, 1] + upper_bound_first_order = computed_indices["confidence_interval_gen_sobol_i"][:, 1] + upper_bound_total_order = computed_indices["confidence_interval_gen_sobol_total_i"][ + :, 1 + ] std_bootstrap_first_order = (upper_bound_first_order - gen_sobol_i) / delta std_bootstrap_total_order = (upper_bound_total_order - gen_sobol_total_i) / delta diff --git a/tests/unit_tests/sensitivity/test_sobol.py b/tests/unit_tests/sensitivity/test_sobol.py index 3c1f11700..ff26801da 100644 --- a/tests/unit_tests/sensitivity/test_sobol.py +++ b/tests/unit_tests/sensitivity/test_sobol.py @@ -183,12 +183,14 @@ def bootstrap_sobol_index_variance(sobol_object, NUM_SAMPLES): First_order = computed_indices["sobol_i"].ravel() Total_order = computed_indices["sobol_total_i"].ravel() - CI_first_order = computed_indices["CI_sobol_i"] - CI_total_order = computed_indices["CI_sobol_total_i"] + confidence_interval_first_order = computed_indices["confidence_interval_sobol_i"] + confidence_interval_total_order = computed_indices[ + "confidence_interval_sobol_total_i" + ] #### Compute variance #### - upper_bound_first_order = CI_first_order[:, 1] - upper_bound_total_order = CI_total_order[:, 1] + upper_bound_first_order = confidence_interval_first_order[:, 1] + upper_bound_total_order = confidence_interval_total_order[:, 1] std_bootstrap_first_order = (upper_bound_first_order - First_order) / delta std_bootstrap_total_order = (upper_bound_total_order - Total_order) / delta From caffb2d07770a4c295e94b525bcb27a956bb3f32 Mon Sep 17 00:00:00 2001 From: Prateek Bhustali Date: Mon, 23 May 2022 15:17:33 +0200 Subject: [PATCH 45/88] Changed variable name: CI -> confidence interval --- .../code/sensitivity/chatterjee/plot_chatterjee_ishigami.py | 2 +- docs/code/sensitivity/sobol/plot_sobol_ishigami.py | 4 ++-- docs/source/sensitivity/cramer_von_mises.rst | 2 +- docs/source/sensitivity/sobol.rst | 6 +++--- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/docs/code/sensitivity/chatterjee/plot_chatterjee_ishigami.py b/docs/code/sensitivity/chatterjee/plot_chatterjee_ishigami.py index 448309e3a..21803cc16 100644 --- a/docs/code/sensitivity/chatterjee/plot_chatterjee_ishigami.py +++ b/docs/code/sensitivity/chatterjee/plot_chatterjee_ishigami.py @@ -63,7 +63,7 @@ # **Confidence intervals for the Chatterjee indices** # %% -computed_indices["CI_chatterjee_i"] +computed_indices["confidence_interval_chatterjee_i"] # %% [markdown] # **Estimated Sobol indices** diff --git a/docs/code/sensitivity/sobol/plot_sobol_ishigami.py b/docs/code/sensitivity/sobol/plot_sobol_ishigami.py index dc118034f..dc1ce0c62 100644 --- a/docs/code/sensitivity/sobol/plot_sobol_ishigami.py +++ b/docs/code/sensitivity/sobol/plot_sobol_ishigami.py @@ -105,10 +105,10 @@ # **Confidence intervals for first order Sobol indices** # %% -computed_indices["CI_sobol_i"] +computed_indices["confidence_interval_sobol_i"] # %% [markdown] # **Confidence intervals for total order Sobol indices** # %% -computed_indices["CI_sobol_total_i"] +computed_indices["confidence_interval_sobol_total_i"] diff --git a/docs/source/sensitivity/cramer_von_mises.rst b/docs/source/sensitivity/cramer_von_mises.rst index 1958c128a..8ee05455a 100644 --- a/docs/source/sensitivity/cramer_von_mises.rst +++ b/docs/source/sensitivity/cramer_von_mises.rst @@ -50,7 +50,7 @@ Methods Attributes """""""""" .. autoattribute:: UQpy.sensitivity.CramervonMises.CVM_i -.. autoattribute:: UQpy.sensitivity.CramervonMises.CI_CVM_i +.. autoattribute:: UQpy.sensitivity.CramervonMises.confidence_interval_CVM_i .. autoattribute:: UQpy.sensitivity.CramervonMises.sobol_i .. autoattribute:: UQpy.sensitivity.CramervonMises.sobol_total_i .. autoattribute:: UQpy.sensitivity.CramervonMises.n_samples diff --git a/docs/source/sensitivity/sobol.rst b/docs/source/sensitivity/sobol.rst index 45f20b612..fee11e54e 100644 --- a/docs/source/sensitivity/sobol.rst +++ b/docs/source/sensitivity/sobol.rst @@ -80,9 +80,9 @@ Attributes """""""""" .. autoattribute:: UQpy.sensitivity.Sobol.sobol_i .. autoattribute:: UQpy.sensitivity.Sobol.sobol_total_i -.. autoattribute:: UQpy.sensitivity.Sobol.CI_sobol_i -.. autoattribute:: UQpy.sensitivity.Sobol.CI_sobol_total_i -.. autoattribute:: UQpy.sensitivity.Sobol.CI_sobol_ij +.. autoattribute:: UQpy.sensitivity.Sobol.confidence_interval_sobol_i +.. autoattribute:: UQpy.sensitivity.Sobol.confidence_interval_sobol_total_i +.. autoattribute:: UQpy.sensitivity.Sobol.confidence_interval_sobol_ij .. autoattribute:: UQpy.sensitivity.Sobol.n_samples .. autoattribute:: UQpy.sensitivity.Sobol.num_vars .. autoattribute:: UQpy.sensitivity.Sobol.multioutput From f3b59350063dd7a2c9809e265efddc4108bc76bb Mon Sep 17 00:00:00 2001 From: Prateek Bhustali Date: Mon, 23 May 2022 15:18:47 +0200 Subject: [PATCH 46/88] Added references to bibliography for Chatterjee --- docs/code/sensitivity/chatterjee/README.rst | 10 +-- docs/source/bibliography.bib | 73 ++++++++++++++++----- docs/source/sensitivity/chatterjee.rst | 7 +- src/UQpy/sensitivity/Chatterjee.py | 2 +- 4 files changed, 64 insertions(+), 28 deletions(-) diff --git a/docs/code/sensitivity/chatterjee/README.rst b/docs/code/sensitivity/chatterjee/README.rst index 540581862..5e48ef1a9 100644 --- a/docs/code/sensitivity/chatterjee/README.rst +++ b/docs/code/sensitivity/chatterjee/README.rst @@ -4,16 +4,12 @@ These examples serve as a guide for using the Chatterjee sensitivity module. The 1. **Ishigami function** - In addition to the Pick and Freeze scheme, the Sobol indices can be estimated using the rank statistics approach [2]_. We demonstrate this estimation of the Sobol indices using the Ishigami function. + In addition to the Pick and Freeze scheme, the Sobol indices can be estimated using the rank statistics approach :cite:`gamboa2020global`. We demonstrate this estimation of the Sobol indices using the Ishigami function. 2. **Exponential function** - For the Exponential model, analytical CramĆ©r-von Mises indices are available [1]_ and since they are equivalent to the Chatterjee indices in the sample limit, they are shown here. + For the Exponential model, analytical CramĆ©r-von Mises indices are available :cite:`CVM` and since they are equivalent to the Chatterjee indices in the sample limit, they are shown here. 3. **Sobol function** - This example was considered in [2]_ (page 18) to compare the Pick and Freeze scheme with the rank statistics approach for estimating the Sobol indices. - -.. [1] Gamboa, F., Klein, T., & Lagnoux, A. (2018). Sensitivity Analysis Based on CramĆ©r-von Mises Distance. SIAM/ASA Journal on Uncertainty Quantification, 6(2), 522-548. doi:10.1137/15M1025621. (`Link `_) - -.. [2] Fabrice Gamboa, Pierre Gremaud, Thierry Klein, and AgnĆØs Lagnoux. (2020). Global Sensitivity Analysis: a new generation of mighty estimators based on rank statistics. + This example was considered in :cite:`gamboa2020global` (page 18) to compare the Pick and Freeze scheme with the rank statistics approach for estimating the Sobol indices. diff --git a/docs/source/bibliography.bib b/docs/source/bibliography.bib index de41a858b..3d2c3a281 100644 --- a/docs/source/bibliography.bib +++ b/docs/source/bibliography.bib @@ -465,22 +465,6 @@ @article{Stretch2 pages={306ā€“312} } -@article{Morris1, -title = {An effective screening design for sensitivity analysis of large models}, -journal = {Environmental Modelling & Software}, -volume = {22}, -number = {10}, -pages = {1509-1518}, -year = {2007}, -note = {Modelling, computer-assisted simulations, and mapping of dangerous phenomena for hazard assessment}, -issn = {1364-8152}, -doi = {https://doi.org/10.1016/j.envsoft.2006.10.004}, -url = {https://www.sciencedirect.com/science/article/pii/S1364815206002805}, -author = {Francesca Campolongo and Jessica Cariboni and Andrea Saltelli}, -keywords = {Sensitivity analysis, Screening problem, Model-free methods, Effective sampling strategy, Dimethylsulphide (DMS)}, -abstract = {In 1991 Morris proposed an effective screening sensitivity measure to identify the few important factors in models with many factors. The method is based on computing for each input a number of incremental ratios, namely elementary effects, which are then averaged to assess the overall importance of the input. Despite its value, the method is still rarely used and instead local analyses varying one factor at a time around a baseline point are usually employed. In this piece of work we propose a revised version of the elementary effects method, improved in terms of both the definition of the measure and the sampling strategy. In the present form the method shares many of the positive qualities of the variance-based techniques, having the advantage of a lower computational cost, as demonstrated by the analytical examples. The method is employed to assess the sensitivity of a chemical reaction model for dimethylsulphide (DMS), a gas involved in climate change. Results of the sensitivity analysis open up the ground for model reconsideration: some model components may need a more thorough modelling effort while some others may need to be simplified.} -} - @article{StochasticProcess1, title = {Digital simulation of random processes and its applications}, journal = {Journal of Sound and Vibration}, @@ -744,3 +728,60 @@ @article{dsilva2018parsimonious year={2018}, publisher={Elsevier} } + +################ Sensitivity Analysis ######################## + +# Morris +@article{Morris1, +title = {An effective screening design for sensitivity analysis of large models}, +journal = {Environmental Modelling & Software}, +volume = {22}, +number = {10}, +pages = {1509-1518}, +year = {2007}, +note = {Modelling, computer-assisted simulations, and mapping of dangerous phenomena for hazard assessment}, +issn = {1364-8152}, +doi = {https://doi.org/10.1016/j.envsoft.2006.10.004}, +url = {https://www.sciencedirect.com/science/article/pii/S1364815206002805}, +author = {Francesca Campolongo and Jessica Cariboni and Andrea Saltelli}, +keywords = {Sensitivity analysis, Screening problem, Model-free methods, Effective sampling strategy, Dimethylsulphide (DMS)}, +abstract = {In 1991 Morris proposed an effective screening sensitivity measure to identify the few important factors in models with many factors. The method is based on computing for each input a number of incremental ratios, namely elementary effects, which are then averaged to assess the overall importance of the input. Despite its value, the method is still rarely used and instead local analyses varying one factor at a time around a baseline point are usually employed. In this piece of work we propose a revised version of the elementary effects method, improved in terms of both the definition of the measure and the sampling strategy. In the present form the method shares many of the positive qualities of the variance-based techniques, having the advantage of a lower computational cost, as demonstrated by the analytical examples. The method is employed to assess the sensitivity of a chemical reaction model for dimethylsulphide (DMS), a gas involved in climate change. Results of the sensitivity analysis open up the ground for model reconsideration: some model components may need a more thorough modelling effort while some others may need to be simplified.} +} + +# Chatterjee +@article{Chatterjee, +author = {Sourav Chatterjee}, +title = {A New Coefficient of Correlation}, +journal = {Journal of the American Statistical Association}, +volume = {116}, +number = {536}, +pages = {2009-2022}, +year = {2021}, +publisher = {Taylor & Francis}, +doi = {10.1080/01621459.2020.1758115}, +URL = {https://doi.org/10.1080/01621459.2020.1758115}, +eprint = {https://doi.org/10.1080/01621459.2020.1758115} +} + +@misc{gamboa2020global, + title={Global Sensitivity Analysis: a new generation of mighty estimators based on rank statistics}, + author={Fabrice Gamboa and Pierre Gremaud and Thierry Klein and AgnĆØs Lagnoux}, + year={2020}, + eprint={2003.01772}, + archivePrefix={arXiv}, + primaryClass={math.ST} +} + +# CramĆ©r-von Mises index +@article{CVM, +author = {Gamboa, Fabrice and Klein, Thierry and Lagnoux, AgnĆØs}, +title = {Sensitivity Analysis Based on CramĆ©r--von Mises Distance}, +journal = {SIAM/ASA Journal on Uncertainty Quantification}, +volume = {6}, +number = {2}, +pages = {522-548}, +year = {2018}, +doi = {10.1137/15M1025621}, +URL = {https://doi.org/10.1137/15M1025621}, +eprint = {https://doi.org/10.1137/15M1025621}, +} diff --git a/docs/source/sensitivity/chatterjee.rst b/docs/source/sensitivity/chatterjee.rst index e2e8ce006..57921b4b2 100644 --- a/docs/source/sensitivity/chatterjee.rst +++ b/docs/source/sensitivity/chatterjee.rst @@ -1,9 +1,9 @@ Chatterjee indices ---------------------------------------- -The Chatterjee index measures the strength of the relationship between :math:`X` and :math:`Y` using rank statistics. +The Chatterjee index measures the strength of the relationship between :math:`X` and :math:`Y` using rank statistics :cite:`Chatterjee`. -Consider :math:`n` samples of random variables :math:`X` and :math:`Y`, with :math:`(X_{(1)}, Y_{(1)}), \ldots,(X_{(n)}, Y_{(n)})` such that :math:`X_{(1)} \leq \cdots \leq X_{(n)}`. Here, random variable :math:`X` can be one of the inputs of a model and :math:`Y` be the model response. If :math:`X_{i}`'s have no ties, there is a unique way of doing this (case of ties is also taken into account in the implementation, see [1]_). Let :math:`r_{i}`` be the rank of :math:`Y_{(i)}`, that is, the number of :math:`j` such that :math:`Y_{(j)} \leq Y_{(i)}`.The Chatterjee index :math:`\xi_{n}(X, Y)` is defined as: +Consider :math:`n` samples of random variables :math:`X` and :math:`Y`, with :math:`(X_{(1)}, Y_{(1)}), \ldots,(X_{(n)}, Y_{(n)})` such that :math:`X_{(1)} \leq \cdots \leq X_{(n)}`. Here, random variable :math:`X` can be one of the inputs of a model and :math:`Y` be the model response. If :math:`X_{i}`'s have no ties, there is a unique way of doing this (case of ties is also taken into account in the implementation, see :cite:`Chatterjee`). Let :math:`r_{i}`` be the rank of :math:`Y_{(i)}`, that is, the number of :math:`j` such that :math:`Y_{(j)} \leq Y_{(i)}`.The Chatterjee index :math:`\xi_{n}(X, Y)` is defined as: .. math:: @@ -11,7 +11,6 @@ Consider :math:`n` samples of random variables :math:`X` and :math:`Y`, with :ma The Chatterjee index converges for :math:`n \rightarrow \infty` to the CramĆ©r-von Mises index and is faster to estimate than using the Pick and Freeze approach in the CramĆ©r-von Mises index. -.. [1] Sourav Chatterjee (2021) A New Coefficient of Correlation, Journal of the American Statistical Association, 116:536, 2009-2022, DOI: 10.1080/01621459.2020.1758115 (`Link `_) Chatterjee Class ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -29,7 +28,7 @@ Attributes """""""""" .. autoattribute:: UQpy.sensitivity.Chatterjee.chatterjee_i .. autoattribute:: UQpy.sensitivity.Chatterjee.sobol_i -.. autoattribute:: UQpy.sensitivity.Chatterjee.CI_chatterjee_i +.. autoattribute:: UQpy.sensitivity.Chatterjee.confidence_interval_chatterjee_i .. autoattribute:: UQpy.sensitivity.Chatterjee.num_vars .. autoattribute:: UQpy.sensitivity.Chatterjee.n_samples diff --git a/src/UQpy/sensitivity/Chatterjee.py b/src/UQpy/sensitivity/Chatterjee.py index a171e6247..53c470b5b 100644 --- a/src/UQpy/sensitivity/Chatterjee.py +++ b/src/UQpy/sensitivity/Chatterjee.py @@ -283,7 +283,7 @@ def rank_analog_to_pickfreeze( ): r""" Computing the :math:`N(j)` for each :math:`j \in \{1, \ldots, n\}` - as in eq.(8) in [6]_, where :math:`n` is the size of :math:`X`. + as in eq.(8) in :cite:`gamboa2020global`, where :math:`n` is the size of :math:`X`. .. math:: :nowrap: From e3f0d7a6661af28d59c9c2ed5a5abf265f17f37d Mon Sep 17 00:00:00 2001 From: Prateek Bhustali Date: Mon, 23 May 2022 15:23:05 +0200 Subject: [PATCH 47/88] Added references to bibliography for CVM --- docs/code/sensitivity/cramer_von_mises/README.rst | 6 ++---- docs/source/sensitivity/cramer_von_mises.rst | 8 ++------ 2 files changed, 4 insertions(+), 10 deletions(-) diff --git a/docs/code/sensitivity/cramer_von_mises/README.rst b/docs/code/sensitivity/cramer_von_mises/README.rst index b87758792..59036863e 100644 --- a/docs/code/sensitivity/cramer_von_mises/README.rst +++ b/docs/code/sensitivity/cramer_von_mises/README.rst @@ -4,10 +4,8 @@ These examples serve as a guide for using the CramĆ©r-von Mises sensitivity modu 1. **Exponential function** - For the Exponential model, analytical CramĆ©r-von Mises indices are available [1]_. + For the Exponential model, analytical CramĆ©r-von Mises indices are available :cite:`CVM`. 2. **Sobol function** - The CramĆ©r-von Mises indices are computed using the Pick and Freeze approach [1]_. These model evaluations can be used to estimate the Sobol indices as well. We demonstrate this using the Sobol function. - -.. [1] Gamboa, F., Klein, T., & Lagnoux, A. (2018). Sensitivity Analysis Based on CramĆ©r-von Mises Distance. SIAM/ASA Journal on Uncertainty Quantification, 6(2), 522-548. doi:10.1137/15M1025621. (`Link `_) \ No newline at end of file + The CramĆ©r-von Mises indices are computed using the Pick and Freeze approach :cite:`CVM`. These model evaluations can be used to estimate the Sobol indices as well. We demonstrate this using the Sobol function. diff --git a/docs/source/sensitivity/cramer_von_mises.rst b/docs/source/sensitivity/cramer_von_mises.rst index 8ee05455a..22477cbc2 100644 --- a/docs/source/sensitivity/cramer_von_mises.rst +++ b/docs/source/sensitivity/cramer_von_mises.rst @@ -1,7 +1,7 @@ CramĆ©r-von Mises indices ---------------------------------------- -A sensitivity index based on the CramĆ©r-von Mises distance. In contrast to the variance based Sobol indices, it takes into account the whole distribution of the model output and is therefore considered as a moment-free method [1]_. Furthermore the index can be naturally extended to multivariate model outputs (not implemented yet in UQPy). +A sensitivity index based on the CramĆ©r-von Mises distance. In contrast to the variance based Sobol indices, it takes into account the whole distribution of the model output and is therefore considered as a moment-free method :cite:`CVM`. Furthermore the index can be naturally extended to multivariate model outputs (not implemented yet in UQPy). Consider a model :math:`Y=f(X): \mathbb{R}^d \rightarrow \mathbb{R}^k` with :math:`d` inputs :math:`X_{(1)}, X_{(2)}, \ldots, X_{(d)}` and :math:`k` outputs :math:`Y_{(1)}, Y_{(2)}, \ldots, Y_{(k)}`. We define the cumulative distribution function :math:`F(t)` of :math:`Y` as: @@ -29,11 +29,7 @@ and the total CramĆ©r-von Mises index :math:`S_{2, C V M}^{T o t, i}` (for input S_{2, C V M}^{T o t, i}:=1-S_{2, C V M}^{\sim i}=1-\frac{\int_{\mathbb{R}^{k}} \mathbb{E}\left[\left(F(t)-F^{\sim i}(t)\right)^{2}\right] d F(t)}{\int_{\mathbb{R}^{k}} F(t)(1-F(t)) d F(t)} -The above first and total order indices are estimated using the Pick-and-Freeze approach. This requires :math:`N(d+2)` model evaluations, where :math:`N` is the number of samples. (For implementation details, see also [2]_.) - -.. [1] Gamboa, F., Klein, T., & Lagnoux, A. (2018). Sensitivity Analysis Based on CramĆ©r-von Mises Distance. SIAM/ASA Journal on Uncertainty Quantification, 6(2), 522-548. doi:10.1137/15M1025621. (`Link `_) - -.. [2] Gamboa, F., Gremaud, P., Klein, T., & Lagnoux, A. (2020). Global Sensitivity Analysis: a new generation of mighty estimators based on rank statistics. arXiv [math.ST]. (`Link `_) +The above first and total order indices are estimated using the Pick-and-Freeze approach. This requires :math:`N(d+2)` model evaluations, where :math:`N` is the number of samples. (For implementation details, see also :cite:`gamboa2020global`.) CramĆ©r-von Mises Class ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ From 168f7d3e389ce93de2fcdd2ea8927603d00c8deb Mon Sep 17 00:00:00 2001 From: Prateek Bhustali Date: Mon, 23 May 2022 15:30:31 +0200 Subject: [PATCH 48/88] Added references to bibliography for GSI --- .../sensitivity/generalised_sobol/README.rst | 6 ++---- docs/source/bibliography.bib | 16 ++++++++++++++++ docs/source/sensitivity/generalised_sobol.rst | 5 +---- 3 files changed, 19 insertions(+), 8 deletions(-) diff --git a/docs/code/sensitivity/generalised_sobol/README.rst b/docs/code/sensitivity/generalised_sobol/README.rst index 78ede7984..44406106e 100644 --- a/docs/code/sensitivity/generalised_sobol/README.rst +++ b/docs/code/sensitivity/generalised_sobol/README.rst @@ -5,10 +5,8 @@ These examples serve as a guide for using the GSI sensitivity module. They have 1. **Mechanical oscillator ODE** - The GSI sensitivity indices are computed for a mechanical oscillator governed by a second-order differential equation [1]_. The model outputs the displacement of the oscillator for a given time period. Unlike the pointwise-in-time Sobol indices, which provide the sensitivity of the model parameters at each point in time, the GSI indices summarise the sensitivities of the model parameters over the entire time period. + The GSI sensitivity indices are computed for a mechanical oscillator governed by a second-order differential equation :cite:`GSI`. The model outputs the displacement of the oscillator for a given time period. Unlike the pointwise-in-time Sobol indices, which provide the sensitivity of the model parameters at each point in time, the GSI indices summarise the sensitivities of the model parameters over the entire time period. 2. **Toy example** - The GSI sensitivity indices are computed for a toy model whose analytical solution is given in [1]_. - -.. [1] Gamboa F, Janon A, Klein T, Lagnoux A, others. Sensitivity analysis for multidimensional and functional outputs. Electronic journal of statistics 2014; 8(1): 575-603. \ No newline at end of file + The GSI sensitivity indices are computed for a toy model whose analytical solution is given in :cite:`GSI`. diff --git a/docs/source/bibliography.bib b/docs/source/bibliography.bib index 3d2c3a281..7bd9acd0b 100644 --- a/docs/source/bibliography.bib +++ b/docs/source/bibliography.bib @@ -785,3 +785,19 @@ @article{CVM URL = {https://doi.org/10.1137/15M1025621}, eprint = {https://doi.org/10.1137/15M1025621}, } + + +# Generalised Sobol index +@article{GSI, +author = {Fabrice Gamboa and Alexandre Janon and Thierry Klein and AgnĆØs Lagnoux}, +title = {{Sensitivity analysis for multidimensional and functional outputs}}, +volume = {8}, +journal = {Electronic Journal of Statistics}, +number = {1}, +publisher = {Institute of Mathematical Statistics and Bernoulli Society}, +pages = {575 -- 603}, +keywords = {Concentration inequalities, quadratic functionals, Semi-parametric efficient estimation, sensitivity analysis, Sobol indices, temporal output, vector output}, +year = {2014}, +doi = {10.1214/14-EJS895}, +URL = {https://doi.org/10.1214/14-EJS895} +} diff --git a/docs/source/sensitivity/generalised_sobol.rst b/docs/source/sensitivity/generalised_sobol.rst index 402b3190e..1fcb5fd5a 100644 --- a/docs/source/sensitivity/generalised_sobol.rst +++ b/docs/source/sensitivity/generalised_sobol.rst @@ -1,7 +1,7 @@ Generalised Sobol indices ---------------------------------------- -A natural generalization of the Sobol indices (that are classically defined for single-output models) for multi-output models. The generalised Sobol indices are computed using the Pick-and-Freeze approach. (For implementation details, see also [1]_.) +A natural generalization of the Sobol indices (that are classically defined for single-output models) for multi-output models. The generalised Sobol indices are computed using the Pick-and-Freeze approach. (For implementation details, see also :cite:`GSI`.) Consider a model :math:`Y=f(X): \mathbb{R}^d \rightarrow \mathbb{R}^k` with :math:`d` inputs :math:`X=\left[ X_{1}, X_{2},ā€¦,X_{d} \right]` and :math:`k` outputs :math:`Y=\left[ Y_{1}, Y_{2},ā€¦,Y_{k} \right]`. @@ -47,9 +47,6 @@ and \Sigma_{N}=\frac{1}{N} \sum_{j=1}^{N} \frac{Y_{j} Y_{j}^{t}+Y_{j}^{\mathbf{i}}\left(Y_{j}^{\mathbf{i}}\right)^{t}}{2}-\left(\frac{1}{N} \sum_{j=1}^{N} \frac{Y_{j}+Y_{j}^{\mathbf{i}}}{2}\right)\left(\frac{1}{N} \sum_{j=1}^{N} \frac{Y_{j}+Y_{j}^{\mathbf{i}}}{2}\right)^{t} -.. [1] Gamboa F, Janon A, Klein T, Lagnoux A, others. Sensitivity analysis for multidimensional and functional outputs. Electronic journal of statistics 2014; 8(1): 575-603.(`Link `_) - - Generalised Sobol Class ^^^^^^^^^^^^^^^^^^^^^^^^^^ From dd6393f5155fafffdb5f2337090ac97bd5e22a40 Mon Sep 17 00:00:00 2001 From: Prateek Bhustali Date: Mon, 23 May 2022 15:38:25 +0200 Subject: [PATCH 49/88] Added references to bibliography for Sobol --- docs/source/bibliography.bib | 27 +++++++++++++++++++++++++++ docs/source/sensitivity/sobol.rst | 15 +++++---------- 2 files changed, 32 insertions(+), 10 deletions(-) diff --git a/docs/source/bibliography.bib b/docs/source/bibliography.bib index 7bd9acd0b..f46fc199a 100644 --- a/docs/source/bibliography.bib +++ b/docs/source/bibliography.bib @@ -801,3 +801,30 @@ @article{GSI doi = {10.1214/14-EJS895}, URL = {https://doi.org/10.1214/14-EJS895} } + +# Sobol +@book{saltelli_2008, + author = {Saltelli, A.}, + description = {Global sensitivity analysis: the primer - Andrea Saltelli}, + isbn = {9780470059975}, + keywords = {sensitivity statistics}, + lccn = {2007045551}, + publisher = {John Wiley}, + title = {Global sensitivity analysis: the primer}, + url = {https://onlinelibrary.wiley.com/doi/book/10.1002/9780470725184}, + year = 2008 +} + +@article{saltelli_2002, +title = {Making best use of model evaluations to compute sensitivity indices}, +journal = {Computer Physics Communications}, +volume = {145}, +number = {2}, +pages = {280-297}, +year = {2002}, +issn = {0010-4655}, +doi = {https://doi.org/10.1016/S0010-4655(02)00280-1}, +url = {https://www.sciencedirect.com/science/article/pii/S0010465502002801}, +author = {Andrea Saltelli}, +keywords = {Sensitivity analysis, Sensitivity measures, Sensitivity indices, Importance measures}, +} \ No newline at end of file diff --git a/docs/source/sensitivity/sobol.rst b/docs/source/sensitivity/sobol.rst index fee11e54e..60469b28c 100644 --- a/docs/source/sensitivity/sobol.rst +++ b/docs/source/sensitivity/sobol.rst @@ -31,7 +31,7 @@ Here, :math:`N` is the number of Monte Carlo samples and :math:`m` being the num Compared to "Sobol1993", the "Janon2014" estimator makes more efficient use of model evaluations and produces smaller (better) confidence intervals. -- Sobol1993: Requires :math:`N(m + 1)` model evaluations [1]_. +- Sobol1993: Requires :math:`N(m + 1)` model evaluations :cite:`saltelli_2008`. .. math:: S_{i} = \frac{\mathbb{V}\left[E\left(Y \mid X_{i}\right)\right]}{\mathbb{V}(Y)} = \frac{ (1/N) Y_{A} \cdot Y_{C_{i}}-f_{0}^{2}}{(1 / N) Y_{A} \cdot Y_{A}-f_{0}^{2}} @@ -39,15 +39,15 @@ Compared to "Sobol1993", the "Janon2014" estimator makes more efficient use of m .. math:: y_{A}=f(A), \quad y_{C_{i}}=f(C_{i}), \quad f_{0}^{2}=\left(\frac{1}{N} \sum_{j=1}^{N} y_{A}^{(j)} \right)^{2} -- Saltelli2002: Requires :math:`N(2m + 2)` model evaluations [2]_. +- Saltelli2002: Requires :math:`N(2m + 2)` model evaluations :cite:`saltelli_2002`. 2. **Second order indices** (:math:`S_{ij}`) -- Saltelli2002: Requires :math:`N(2m + 2)` model evaluations [2]_. +- Saltelli2002: Requires :math:`N(2m + 2)` model evaluations :cite:`saltelli_2002`. 3. **Total order indices** (:math:`S_{T_{i}}`) -- Homma1996: Requires :math:`N(m + 1)` model evaluations [1]_. +- Homma1996: Requires :math:`N(m + 1)` model evaluations :cite:`saltelli_2008`. .. math:: S_{T_{i}} = 1 - \frac{\mathbb{V}\left[E\left(Y \mid \mathbf{X}_{\sim_{i}}\right)\right]}{\mathbb{V}(Y)} = 1 - \frac{ (1 / N) Y_{B} \cdot Y_{C_{i}}-f_{0}^{2}}{(1 / N) Y_{A} \cdot Y_{A}-f_{0}^{2}} @@ -55,14 +55,9 @@ Compared to "Sobol1993", the "Janon2014" estimator makes more efficient use of m .. math:: y_{A}=f(A), \quad y_{B}=f(B), \quad y_{C_{i}}=f(C_{i}), \quad f_{0}^{2}=\left(\frac{1}{2N} \sum_{j=1}^{N} y_{A}^{(j)} + y_{B}^{(j)} \right)^{2} -- Saltelli2002: Requires :math:`N(2m + 2)` model evaluations [2]_. +- Saltelli2002: Requires :math:`N(2m + 2)` model evaluations :cite:`saltelli_2002`. -.. [1] Saltelli, A. (2008). Global sensitivity analysis: the primer. - John Wiley. ISBN: 9780470059975 - -.. [2] Saltelli, A. (2002). Making best use of model evaluations to compute sensitivity indices. (`Link `_) - Sobol Class ^^^^^^^^^^^^^^^^^^ From 08d954fcaed264a414c5f946d9788ac0145c374a Mon Sep 17 00:00:00 2001 From: Prateek Bhustali Date: Mon, 6 Jun 2022 16:45:37 +0200 Subject: [PATCH 50/88] Added PostProcess module for sensitivity indices. For postprocessing sensitivity studies. Currently supports bar plots for the indices. --- src/UQpy/sensitivity/PostProcess.py | 322 ++++++++++++++++++++++++++++ 1 file changed, 322 insertions(+) create mode 100644 src/UQpy/sensitivity/PostProcess.py diff --git a/src/UQpy/sensitivity/PostProcess.py b/src/UQpy/sensitivity/PostProcess.py new file mode 100644 index 000000000..77e17bfde --- /dev/null +++ b/src/UQpy/sensitivity/PostProcess.py @@ -0,0 +1,322 @@ +""" +This module is used to post-process the sensitivity analysis results. Currently it +supports plotting the sensitivity results and comparing the sensitivity results +(such first order index v/s total order index) using the following two methods: + + 1. plot_index + 2. compare_index + +""" + +import math +import itertools + +import numpy as np +import matplotlib.pyplot as plt + + +def plot_sensitivity_index( + indices, + confidence_interval=None, + plot_title=None, + variable_names=None, + **kwargs, +): + + """ + + This function plots the sensitivity indices (with confidence intervals) + in a bar plot. + + **Inputs:** + + * **indices** (list or ndarray): + list/array of sensitivity indices + Shape: (num_vars) + + * **confidence_interval** (list or ndarray): + list/array of confidence interval for the sensitivity indices. + Shape: (num_vars, 2) + + * **plot_title** (str): + Title of the plot + Default: "Sensitivity index" + + * **variable_names** (list): + List of variable names + Default: [r"$X_{}$".format(i) for i in range(num_vars)] + + * **kwargs (dict): + Keyword arguments for the plot to be passed to matplotlib.pyplot.bar + + """ + + num_vars = len(indices) + + if variable_names is None: + variable_names = [r"$X_{}$".format(i + 1) for i in range(num_vars)] + + # Check if confidence intervals are available + if confidence_interval is not None: + conf_int_flag = True + error = confidence_interval[:, 1] - indices + else: + conf_int_flag = False + + # x and y data + _idx = np.arange(num_vars) + + indices = np.around(indices, decimals=2) # round to 2 decimal places + + # Plot one index + fig, ax = plt.subplots() + width = 0.3 + ax.spines["top"].set_visible(False) + ax.spines["right"].set_visible(False) + + index_bar = ax.bar( + _idx, # x-axis + indices, # y-axis + width=width, # bar width + yerr=error if conf_int_flag else None, # error bars + ecolor="k", # error bar color + capsize=5, # error bar cap size in pt + **kwargs, + ) + + ax.bar_label(index_bar, label_type="edge", fontsize=10) + ax.set_xticks(_idx, variable_names) + ax.set_xlabel("Model inputs") + ax.set_ylim(top=1) # set only upper limit of y to 1 + ax.set_title(plot_title) + + plt.show() + + return fig, ax + + +def plot_index_comparison( + indices_1, + indices_2, + confidence_interval_1=None, + confidence_interval_2=None, + label_1=None, + label_2=None, + plot_title="Sensitivity index", + variable_names=None, + **kwargs, +): + + """ + + This function plots two sensitivity indices (with confidence intervals) + in a bar plot for comparison. + For example: + first order Sobol indices and total order Sobol indices + OR + first order Sobol indices and Chatterjee indices. + + **Inputs:** + + * **indices_1** (list or ndarray): + list/array of sensitivity indices + Shape: (num_vars) + + * **indices_2** (list or ndarray): + list/array of sensitivity indices + Shape: (num_vars) + + * **confidence_interval_1** (list or ndarray): + list/array of confidence interval for the sensitivity indices. + Shape: (num_vars, 2) + Default: None + + * **confidence_interval_2** (list or ndarray): + list/array of confidenceiInterval for the sensitivity indices. + Shape: (num_vars, 2) + Default: None + + * **plot_title** (str): + Title of the plot + + * **variable_names** (list): + List of variable names + Default: [r"$X_{}$".format(i) for i in range(num_vars)] + + * **kwargs (dict): + Keyword arguments for the plot to be passed to matplotlib.pyplot.bar + + """ + + if indices_1 is None and indices_2 is None: + raise ValueError("Please provide two indices to plot") + + if len(indices_1) != len(indices_2): + raise ValueError("indices_1 and indices_2 should have the same length") + + num_vars = len(indices_1) + + if variable_names is None: + variable_names = [r"$X_{}$".format(i + 1) for i in range(num_vars)] + + # Check if confidence intervals are available + if confidence_interval_1 is not None: + conf_int_flag_1 = True + error_1 = confidence_interval_1[:, 1] - indices_1 + else: + conf_int_flag_1 = False + + if confidence_interval_2 is not None: + conf_int_flag_2 = True + error_2 = confidence_interval_2[:, 1] - indices_2 + else: + conf_int_flag_2 = False + + # x and y data + _idx = np.arange(num_vars) + + indices_1 = np.around(indices_1, decimals=2) # round to 2 decimal places + + if indices_2 is not None: + indices_2 = np.around(indices_2, decimals=2) # round to 2 decimal places + + # Plot two indices side by side + fig, ax = plt.subplots() + width = 0.3 + ax.spines["top"].set_visible(False) + ax.spines["right"].set_visible(False) + + bar_indices_1 = ax.bar( + _idx - width / 2, # x-axis + indices_1, # y-axis + width=width, # bar width + color="C0", # bar color + # alpha=0.5, # bar transparency + label=label_1, # bar label + yerr=error_1 if conf_int_flag_1 else None, + ecolor="k", # error bar color + capsize=5, # error bar cap size in pt + ) + + bar_indices_2 = ax.bar( + _idx + width / 2, # x-axis + indices_2, # y-axis + width=width, # bar width + color="C1", # bar color + # alpha=0.5, # bar transparency + label=label_2, # bar label + yerr=error_2 if conf_int_flag_2 else None, + ecolor="k", # error bar color + capsize=5, # error bar cap size in pt + ) + + ax.bar_label(bar_indices_1, label_type="edge", fontsize=10) + ax.bar_label(bar_indices_2, label_type="edge", fontsize=10) + ax.set_xticks(_idx, variable_names) + ax.set_xlabel("Model inputs") + ax.set_title(plot_title) + ax.set_ylim(top=1) # set only upper limit of y to 1 + ax.legend() + + plt.show() + + return fig, ax + + +def plot_second_order_indices( + indices, + num_vars, + confidence_interval=None, + plot_title="Second order indices", + variable_names=None, + **kwargs, +): + + """ + + This function plots second order indices (with confidence intervals) + in a bar plot. + + **Inputs:** + + * **indices** (list or ndarray): + list/array of second order indices + Shape: (n_parameters) + + * **confidence_interval** (list or ndarray): + list/array of confidence interval for the second order indices. + Shape: (n_p, 2) + + * **label** (str): + Label of the plot + + * **plot_title** (str): + Title of the plot + + * **variable_names** (list): + List of variable names + Default: (Assumes that the indices are in lexicographic order.) + [r"$X_{}$".format(i) for i in range(n_parameters)] + + * **kwargs (dict): + Keyword arguments for the plot to be passed to matplotlib.pyplot.bar + + """ + + num_second_order_terms = len(indices) + + if variable_names is None: + variable_names = [r"$X_{}$".format(i + 1) for i in range(num_vars)] + + # All combinations of variables + all_combs = list(itertools.combinations(variable_names, 2)) + + # # Create a list of all combinations of variables + all_combs_list = [" ".join(comb) for comb in all_combs] + + # Check if confidence intervals are available + if confidence_interval is not None: + conf_int_flag = True + error = confidence_interval[:, 1] - indices + else: + conf_int_flag = False + + # x and y data + _idx = np.arange(num_second_order_terms) + + indices = np.around(indices, decimals=2) # round to 2 decimal places + + # Plot one index + fig, ax = plt.subplots() + width = 0.3 + ax.spines["top"].set_visible(False) + ax.spines["right"].set_visible(False) + + index_bar = ax.bar( + _idx, # x-axis + indices, # y-axis + width=width, # bar width + yerr=error if conf_int_flag else None, # error bars + ecolor="k", # error bar color + capsize=5, # error bar cap size in pt + **kwargs, + ) + + ax.bar_label(index_bar, label_type="edge", fontsize=10) + + ax.set_xticks(_idx, all_combs_list) + # generally, there are many second order terms + # so we need to make sure that the labels are + # not overlapping. We do this by rotating the labels + plt.setp( + ax.get_xticklabels(), + rotation=30, + horizontalalignment="right", + ) + ax.set_xlabel("Model inputs") + ax.set_ylim(top=1) # set only upper limit of y to 1 + ax.set_title(plot_title) + + plt.show() + + return fig, ax From d11e3955b0fc75166a39f6e87698a2e281304504 Mon Sep 17 00:00:00 2001 From: Prateek Bhustali Date: Mon, 6 Jun 2022 16:46:03 +0200 Subject: [PATCH 51/88] Added plots in Sobol examples --- .../sensitivity/sobol/plot_sobol_additive.py | 15 ++++++++++ .../code/sensitivity/sobol/plot_sobol_func.py | 28 +++++++++++++++++++ .../sensitivity/sobol/plot_sobol_ishigami.py | 26 +++++++++++++++++ 3 files changed, 69 insertions(+) diff --git a/docs/code/sensitivity/sobol/plot_sobol_additive.py b/docs/code/sensitivity/sobol/plot_sobol_additive.py index 973e97dd6..51ee993c0 100644 --- a/docs/code/sensitivity/sobol/plot_sobol_additive.py +++ b/docs/code/sensitivity/sobol/plot_sobol_additive.py @@ -9,11 +9,16 @@ """ # %% +import numpy as np + from UQpy.run_model.RunModel import RunModel from UQpy.run_model.model_execution.PythonModel import PythonModel from UQpy.distributions import Normal from UQpy.distributions.collection.JointIndependent import JointIndependent from UQpy.sensitivity.Sobol import Sobol +from UQpy.sensitivity.PostProcess import * + +np.random.seed(123) # %% [markdown] # **Define the model and input distributions** @@ -56,3 +61,13 @@ # %% computed_indices["sobol_i"] + +# %% +# **Plot the first and total order sensitivity indices** +fig1, ax1 = plot_index_comparison( + computed_indices["sobol_i"][:, 0], + computed_indices["sobol_total_i"][:, 0], + label_1="First order Sobol indices", + label_2="Total order Sobol indices", + plot_title="First and Total order Sobol indices", +) diff --git a/docs/code/sensitivity/sobol/plot_sobol_func.py b/docs/code/sensitivity/sobol/plot_sobol_func.py index 5a5cb9389..7c6058831 100644 --- a/docs/code/sensitivity/sobol/plot_sobol_func.py +++ b/docs/code/sensitivity/sobol/plot_sobol_func.py @@ -26,6 +26,9 @@ from UQpy.distributions import Uniform from UQpy.distributions.collection.JointIndependent import JointIndependent from UQpy.sensitivity.Sobol import Sobol +from UQpy.sensitivity.PostProcess import * + +np.random.seed(123) # %% [markdown] # **Define the model and input distributions** @@ -76,6 +79,14 @@ # %% computed_indices["sobol_i"] +# %% +# **Plot the first order sensitivity indices** +fig1, ax1 = plot_sensitivity_index( + computed_indices["sobol_i"][:, 0], + plot_title="First order Sobol indices", + color="C0", +) + # %% [markdown] # **Total order Sobol indices** # @@ -97,6 +108,16 @@ # %% computed_indices["sobol_total_i"] +# %% +# **Plot the first and total order sensitivity indices** +fig2, ax2 = plot_index_comparison( + computed_indices["sobol_i"][:, 0], + computed_indices["sobol_total_i"][:, 0], + label_1="First order Sobol indices", + label_2="Total order Sobol indices", + plot_title="First and Total order Sobol indices", +) + # %% [markdown] # **Second order Sobol indices** # @@ -134,3 +155,10 @@ # %% computed_indices["sobol_ij"] + +# %% +# **Plot the second order sensitivity indices** +fig3, ax3 = plot_second_order_indices( + computed_indices["sobol_ij"][:, 0], + num_vars=num_vars, +) diff --git a/docs/code/sensitivity/sobol/plot_sobol_ishigami.py b/docs/code/sensitivity/sobol/plot_sobol_ishigami.py index dc1ce0c62..b63420290 100644 --- a/docs/code/sensitivity/sobol/plot_sobol_ishigami.py +++ b/docs/code/sensitivity/sobol/plot_sobol_ishigami.py @@ -46,6 +46,9 @@ from UQpy.distributions import Uniform from UQpy.distributions.collection.JointIndependent import JointIndependent from UQpy.sensitivity.Sobol import Sobol +from UQpy.sensitivity.PostProcess import * + +np.random.seed(123) # %% [markdown] # **Define the model and input distributions** @@ -112,3 +115,26 @@ # %% computed_indices["confidence_interval_sobol_total_i"] + +# %% +# **Plot the first order sensitivity indices** +fig1, ax1 = plot_sensitivity_index( + computed_indices["sobol_i"][:, 0], + confidence_interval=computed_indices["confidence_interval_sobol_i"], + plot_title="First order Sobol indices", + variable_names=["$X_1$", "$X_2$", "$X_3$"], + color="C0", +) + +# %% +# **Plot the first and total order sensitivity indices** +fig2, ax2 = plot_index_comparison( + computed_indices["sobol_i"][:, 0], + computed_indices["sobol_total_i"][:, 0], + confidence_interval_1=computed_indices["confidence_interval_sobol_i"], + confidence_interval_2=computed_indices["confidence_interval_sobol_total_i"], + label_1="First order Sobol indices", + label_2="Total order Sobol indices", + plot_title="First and Total order Sobol indices", + variable_names=["$X_1$", "$X_2$", "$X_3$"], +) From bb14e82c39af7f845df2c37d94f99687fb6728f9 Mon Sep 17 00:00:00 2001 From: Prateek Bhustali Date: Mon, 6 Jun 2022 16:46:18 +0200 Subject: [PATCH 52/88] Added plots in Generalised Sobol examples --- ...ralised_sobol_mechanical_oscillator_ODE.py | 21 +++++++++ .../plot_generalised_sobol_multioutput.py | 45 ++++++++++++++++++- 2 files changed, 65 insertions(+), 1 deletion(-) diff --git a/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_mechanical_oscillator_ODE.py b/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_mechanical_oscillator_ODE.py index 716c498f7..dabc3dfe9 100644 --- a/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_mechanical_oscillator_ODE.py +++ b/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_mechanical_oscillator_ODE.py @@ -30,6 +30,9 @@ from UQpy.distributions import Uniform, Normal from UQpy.distributions.collection.JointIndependent import JointIndependent from UQpy.sensitivity.GeneralisedSobol import GeneralisedSobol +from UQpy.sensitivity.PostProcess import * + +np.random.seed(123) # %% [markdown] # **Define the model and input distributions** @@ -75,8 +78,26 @@ # %% computed_indices["gen_sobol_i"] +# **Plot the first order sensitivity indices** +fig1, ax1 = plot_sensitivity_index( + computed_indices["gen_sobol_i"][:, 0], + plot_title="First order Generalised Sobol indices", + variable_names=[r"$m$", "$c$", "$k$", "$\ell$"], + color="C0", +) + # %% [markdown] # **Total order Generalised Sobol indices** # %% computed_indices["gen_sobol_total_i"] + +# **Plot the first and total order sensitivity indices** +fig2, ax2 = plot_index_comparison( + computed_indices["gen_sobol_i"][:, 0], + computed_indices["gen_sobol_total_i"][:, 0], + label_1="First order", + label_2="Total order", + plot_title="First and Total order Generalised Sobol indices", + variable_names=[r"$m$", "$c$", "$k$", "$\ell$"], +) diff --git a/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_multioutput.py b/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_multioutput.py index af4ca6ff3..0a2a7529c 100644 --- a/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_multioutput.py +++ b/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_multioutput.py @@ -23,6 +23,9 @@ from UQpy.distributions import Uniform, Normal from UQpy.distributions.collection.JointIndependent import JointIndependent from UQpy.sensitivity.GeneralisedSobol import GeneralisedSobol +from UQpy.sensitivity.PostProcess import * + +np.random.seed(123) # %% [markdown] # **Define the model and input distributions** @@ -64,9 +67,28 @@ # %% computed_indices["gen_sobol_i"] +# **Plot the first order sensitivity indices** +fig1, ax1 = plot_sensitivity_index( + computed_indices["gen_sobol_i"][:, 0], + confidence_interval=computed_indices["confidence_interval_gen_sobol_i"], + plot_title="First order Generalised Sobol indices", + color="C0", +) + # %% computed_indices["gen_sobol_total_i"] +# **Plot the first and total order sensitivity indices** +fig2, ax2 = plot_index_comparison( + computed_indices["gen_sobol_i"][:, 0], + computed_indices["gen_sobol_total_i"][:, 0], + confidence_interval_1=computed_indices["confidence_interval_gen_sobol_i"], + confidence_interval_2=computed_indices["confidence_interval_gen_sobol_total_i"], + label_1="First order", + label_2="Total order", + plot_title="First and Total order Generalised Sobol indices", +) + # %% [markdown] # **Compute generalised Sobol indices** @@ -75,7 +97,9 @@ SA = GeneralisedSobol(runmodel_obj, dist_object_2) -computed_indices = SA.run(n_samples=100_000) +computed_indices = SA.run( + n_samples=20_000, confidence_level=0.95, num_bootstrap_samples=5_00 +) # %% [markdown] # **First order Generalised Sobol indices** @@ -91,5 +115,24 @@ # %% computed_indices["gen_sobol_i"] +# **Plot the first order sensitivity indices** +fig3, ax3 = plot_sensitivity_index( + computed_indices["gen_sobol_i"][:, 0], + confidence_interval=computed_indices["confidence_interval_gen_sobol_i"], + plot_title="First order Generalised Sobol indices", + color="C0", +) + # %% computed_indices["gen_sobol_total_i"] + +# **Plot the first and total order sensitivity indices** +fig4, ax4 = plot_index_comparison( + computed_indices["gen_sobol_i"][:, 0], + computed_indices["gen_sobol_total_i"][:, 0], + confidence_interval_1=computed_indices["confidence_interval_gen_sobol_i"], + confidence_interval_2=computed_indices["confidence_interval_gen_sobol_total_i"], + label_1="First order", + label_2="Total order", + plot_title="First and Total order Generalised Sobol indices", +) From ef52484b831764408d5cbeffb21eaa748f3a6e71 Mon Sep 17 00:00:00 2001 From: Prateek Bhustali Date: Mon, 6 Jun 2022 16:46:38 +0200 Subject: [PATCH 53/88] Added plots in CVM index examples --- .../cramer_von_mises/plot_cvm_exponential.py | 26 +++++++++++++++++++ .../cramer_von_mises/plot_cvm_sobol_func.py | 19 +++++++++++++- 2 files changed, 44 insertions(+), 1 deletion(-) diff --git a/docs/code/sensitivity/cramer_von_mises/plot_cvm_exponential.py b/docs/code/sensitivity/cramer_von_mises/plot_cvm_exponential.py index 28b390a47..589166732 100644 --- a/docs/code/sensitivity/cramer_von_mises/plot_cvm_exponential.py +++ b/docs/code/sensitivity/cramer_von_mises/plot_cvm_exponential.py @@ -21,6 +21,9 @@ from UQpy.distributions import Normal from UQpy.distributions.collection.JointIndependent import JointIndependent from UQpy.sensitivity.CramervonMises import CramervonMises as cvm +from UQpy.sensitivity.PostProcess import * + +np.random.seed(123) # %% [markdown] # **Define the model and input distributions** @@ -59,6 +62,13 @@ # %% computed_indices["CVM_i"] +# **Plot the CVM indices** +fig1, ax1 = plot_sensitivity_index( + computed_indices["CVM_i"][:, 0], + plot_title="CramĆ©r-von Mises indices", + color="C4", +) + # %% [markdown] # **Estimated first order Sobol indices** # @@ -71,8 +81,24 @@ # %% computed_indices["sobol_i"] +# **Plot the first order Sobol indices** +fig2, ax2 = plot_sensitivity_index( + computed_indices["sobol_i"][:, 0], + plot_title="First order Sobol indices", + color="C0", +) + # %% [markdown] # **Estimated total order Sobol indices** # %% computed_indices["sobol_total_i"] + +# **Plot the first and total order sensitivity indices** +fig3, ax3 = plot_index_comparison( + computed_indices["sobol_i"][:, 0], + computed_indices["sobol_total_i"][:, 0], + label_1="First order Sobol indices", + label_2="Total order Sobol indices", + plot_title="First and Total order Sobol indices", +) diff --git a/docs/code/sensitivity/cramer_von_mises/plot_cvm_sobol_func.py b/docs/code/sensitivity/cramer_von_mises/plot_cvm_sobol_func.py index 7500c7259..443073593 100644 --- a/docs/code/sensitivity/cramer_von_mises/plot_cvm_sobol_func.py +++ b/docs/code/sensitivity/cramer_von_mises/plot_cvm_sobol_func.py @@ -26,6 +26,9 @@ from UQpy.distributions import Uniform from UQpy.distributions.collection.JointIndependent import JointIndependent from UQpy.sensitivity.CramervonMises import CramervonMises as cvm +from UQpy.sensitivity.PostProcess import * + +np.random.seed(123) # %% [markdown] # **Define the model and input distributions** @@ -54,7 +57,7 @@ SA = cvm(runmodel_obj, dist_object) # Compute Sobol indices using the pick and freeze algorithm -computed_indices = SA.run(n_samples=20_000, estimate_sobol_indices=True) +computed_indices = SA.run(n_samples=50_000, estimate_sobol_indices=True) # %% [markdown] # **CramĆ©r-von Mises indices** @@ -62,6 +65,13 @@ # %% computed_indices["CVM_i"] +# **Plot the CVM indices** +fig1, ax1 = plot_sensitivity_index( + computed_indices["CVM_i"][:, 0], + plot_title="CramĆ©r-von Mises indices", + color="C4", +) + # %% [markdown] # **Estimated Sobol indices** # @@ -81,3 +91,10 @@ # %% computed_indices["sobol_i"] + +# **Plot the first order Sobol indices** +fig2, ax2 = plot_sensitivity_index( + computed_indices["sobol_i"][:, 0], + plot_title="First order Sobol indices", + color="C0", +) From 564086bc89d8e1e81505509c3f697819bc91f9ea Mon Sep 17 00:00:00 2001 From: Prateek Bhustali Date: Mon, 6 Jun 2022 16:46:56 +0200 Subject: [PATCH 54/88] Added plots in Chatterjee index examples --- .../chatterjee/plot_chatterjee_exponential.py | 10 ++++++++++ .../chatterjee/plot_chatterjee_ishigami.py | 18 ++++++++++++++++++ .../chatterjee/plot_chatterjee_sobol_func.py | 17 +++++++++++++++++ 3 files changed, 45 insertions(+) diff --git a/docs/code/sensitivity/chatterjee/plot_chatterjee_exponential.py b/docs/code/sensitivity/chatterjee/plot_chatterjee_exponential.py index 8fa879847..0c67c7452 100644 --- a/docs/code/sensitivity/chatterjee/plot_chatterjee_exponential.py +++ b/docs/code/sensitivity/chatterjee/plot_chatterjee_exponential.py @@ -22,6 +22,9 @@ from UQpy.distributions import Normal from UQpy.distributions.collection.JointIndependent import JointIndependent from UQpy.sensitivity.Chatterjee import Chatterjee +from UQpy.sensitivity.PostProcess import * + +np.random.seed(123) # %% [markdown] # **Define the model and input distributions** @@ -64,3 +67,10 @@ # %% computed_indices["chatterjee_i"] + +# **Plot the Chatterjee indices** +fig1, ax1 = plot_sensitivity_index( + computed_indices["chatterjee_i"][:, 0], + plot_title="Chatterjee indices", + color="C2", +) diff --git a/docs/code/sensitivity/chatterjee/plot_chatterjee_ishigami.py b/docs/code/sensitivity/chatterjee/plot_chatterjee_ishigami.py index 21803cc16..d7f08dcc5 100644 --- a/docs/code/sensitivity/chatterjee/plot_chatterjee_ishigami.py +++ b/docs/code/sensitivity/chatterjee/plot_chatterjee_ishigami.py @@ -22,6 +22,9 @@ from UQpy.distributions import Uniform from UQpy.distributions.collection.JointIndependent import JointIndependent from UQpy.sensitivity.Chatterjee import Chatterjee +from UQpy.sensitivity.PostProcess import * + +np.random.seed(123) # %% [markdown] # **Define the model and input distributions** @@ -65,6 +68,14 @@ # %% computed_indices["confidence_interval_chatterjee_i"] +# **Plot the Chatterjee indices** +fig1, ax1 = plot_sensitivity_index( + computed_indices["chatterjee_i"][:, 0], + computed_indices["confidence_interval_chatterjee_i"], + plot_title="Chatterjee indices", + color="C2", +) + # %% [markdown] # **Estimated Sobol indices** # @@ -78,3 +89,10 @@ # %% computed_indices["sobol_i"] + +# **Plot the first order Sobol indices** +fig2, ax2 = plot_sensitivity_index( + computed_indices["sobol_i"][:, 0], + plot_title="First order Sobol indices", + color="C0", +) diff --git a/docs/code/sensitivity/chatterjee/plot_chatterjee_sobol_func.py b/docs/code/sensitivity/chatterjee/plot_chatterjee_sobol_func.py index 439ffaa85..28e42f635 100644 --- a/docs/code/sensitivity/chatterjee/plot_chatterjee_sobol_func.py +++ b/docs/code/sensitivity/chatterjee/plot_chatterjee_sobol_func.py @@ -33,6 +33,9 @@ from UQpy.distributions import Uniform from UQpy.distributions.collection.JointIndependent import JointIndependent from UQpy.sensitivity.Chatterjee import Chatterjee +from UQpy.sensitivity.PostProcess import * + +np.random.seed(123) # %% [markdown] # **Define the model and input distributions** @@ -69,6 +72,13 @@ # %% computed_indices["chatterjee_i"] +# **Plot the Chatterjee indices** +fig1, ax1 = plot_sensitivity_index( + computed_indices["chatterjee_i"][:, 0], + plot_title="Chatterjee indices", + color="C2", +) + # %% [markdown] # **Estimated Sobol indices** # @@ -88,3 +98,10 @@ # %% computed_indices["sobol_i"] + +# **Plot the first order Sobol indices** +fig2, ax2 = plot_sensitivity_index( + computed_indices["sobol_i"][:, 0], + plot_title="First order Sobol indices", + color="C0", +) From 0216941dad7ea51c8fd057c91b99f223a94f613b Mon Sep 17 00:00:00 2001 From: Prateek Bhustali Date: Mon, 6 Jun 2022 16:58:45 +0200 Subject: [PATCH 55/88] Added type hints to PostProcess module --- src/UQpy/sensitivity/PostProcess.py | 45 +++++++++++++++++------------ 1 file changed, 27 insertions(+), 18 deletions(-) diff --git a/src/UQpy/sensitivity/PostProcess.py b/src/UQpy/sensitivity/PostProcess.py index 77e17bfde..ade3edb39 100644 --- a/src/UQpy/sensitivity/PostProcess.py +++ b/src/UQpy/sensitivity/PostProcess.py @@ -8,18 +8,23 @@ """ -import math import itertools import numpy as np import matplotlib.pyplot as plt +from beartype import beartype +from UQpy.utilities.ValidationTypes import ( + NumpyFloatArray, +) + +@beartype def plot_sensitivity_index( - indices, - confidence_interval=None, - plot_title=None, - variable_names=None, + indices: NumpyFloatArray, + confidence_interval: NumpyFloatArray = None, + plot_title: str = None, + variable_names: list = None, **kwargs, ): @@ -95,15 +100,16 @@ def plot_sensitivity_index( return fig, ax +@beartype def plot_index_comparison( - indices_1, - indices_2, - confidence_interval_1=None, - confidence_interval_2=None, - label_1=None, - label_2=None, - plot_title="Sensitivity index", - variable_names=None, + indices_1: NumpyFloatArray, + indices_2: NumpyFloatArray, + confidence_interval_1: NumpyFloatArray = None, + confidence_interval_2: NumpyFloatArray = None, + label_1: str = None, + label_2: str = None, + plot_title: str = "Sensitivity index", + variable_names: list = None, **kwargs, ): @@ -196,6 +202,7 @@ def plot_index_comparison( yerr=error_1 if conf_int_flag_1 else None, ecolor="k", # error bar color capsize=5, # error bar cap size in pt + **kwargs, ) bar_indices_2 = ax.bar( @@ -208,6 +215,7 @@ def plot_index_comparison( yerr=error_2 if conf_int_flag_2 else None, ecolor="k", # error bar color capsize=5, # error bar cap size in pt + **kwargs, ) ax.bar_label(bar_indices_1, label_type="edge", fontsize=10) @@ -223,12 +231,13 @@ def plot_index_comparison( return fig, ax +@beartype def plot_second_order_indices( - indices, - num_vars, - confidence_interval=None, - plot_title="Second order indices", - variable_names=None, + indices: NumpyFloatArray, + num_vars: int, + confidence_interval: NumpyFloatArray = None, + plot_title: str = "Second order indices", + variable_names: list = None, **kwargs, ): From 624c37325e6f7ff9bf90047562e571f27d183152 Mon Sep 17 00:00:00 2001 From: Prateek Bhustali Date: Mon, 6 Jun 2022 18:23:07 +0200 Subject: [PATCH 56/88] Fixed minor typo --- docs/code/sensitivity/chatterjee/plot_chatterjee_sobol_func.py | 2 +- docs/code/sensitivity/cramer_von_mises/plot_cvm_exponential.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/code/sensitivity/chatterjee/plot_chatterjee_sobol_func.py b/docs/code/sensitivity/chatterjee/plot_chatterjee_sobol_func.py index 28e42f635..9da7d1cbb 100644 --- a/docs/code/sensitivity/chatterjee/plot_chatterjee_sobol_func.py +++ b/docs/code/sensitivity/chatterjee/plot_chatterjee_sobol_func.py @@ -63,7 +63,7 @@ # %% [markdown] SA = Chatterjee(runmodel_obj, dist_object) -# Compute Sobol indices using the pick and freeze algorithm +# Compute Chatterjee indices using the pick and freeze algorithm computed_indices = SA.run(n_samples=500_000, estimate_sobol_indices=True) # %% [markdown] diff --git a/docs/code/sensitivity/cramer_von_mises/plot_cvm_exponential.py b/docs/code/sensitivity/cramer_von_mises/plot_cvm_exponential.py index 589166732..6f3f74b93 100644 --- a/docs/code/sensitivity/cramer_von_mises/plot_cvm_exponential.py +++ b/docs/code/sensitivity/cramer_von_mises/plot_cvm_exponential.py @@ -47,7 +47,7 @@ # %% SA = cvm(runmodel_obj, dist_object) -# Compute Sobol indices using the pick and freeze algorithm +# Compute CVM indices using the pick and freeze algorithm computed_indices = SA.run(n_samples=20_000, estimate_sobol_indices=True) # %% [markdown] From 7da93b1420e584141685a62df3e958199c02b8ee Mon Sep 17 00:00:00 2001 From: Prateek Bhustali Date: Mon, 6 Jun 2022 18:24:06 +0200 Subject: [PATCH 57/88] Added a basic comparison of sensitivity indices --- docs/code/sensitivity/comparison/README.rst | 6 + .../sensitivity/comparison/local_additive.py | 21 +++ .../sensitivity/comparison/local_ishigami.py | 23 +++ .../sensitivity/comparison/plot_additive.py | 147 ++++++++++++++++ .../sensitivity/comparison/plot_ishigami.py | 165 ++++++++++++++++++ docs/source/conf.py | 2 + docs/source/sensitivity/index.rst | 7 + 7 files changed, 371 insertions(+) create mode 100644 docs/code/sensitivity/comparison/README.rst create mode 100644 docs/code/sensitivity/comparison/local_additive.py create mode 100644 docs/code/sensitivity/comparison/local_ishigami.py create mode 100644 docs/code/sensitivity/comparison/plot_additive.py create mode 100644 docs/code/sensitivity/comparison/plot_ishigami.py diff --git a/docs/code/sensitivity/comparison/README.rst b/docs/code/sensitivity/comparison/README.rst new file mode 100644 index 000000000..59f928b8b --- /dev/null +++ b/docs/code/sensitivity/comparison/README.rst @@ -0,0 +1,6 @@ +Comparison of Sensitivity indices +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +In this section we compare the sensitivity indices (Sobol, CramĆ©r-von Mises and Chatterjee) available in the package using the 'Ishigami function' and the 'Additive model' to illustrate the differences. + +In both the examples, we note that the CramĆ©r-von Mises indices and the Chatterjee indices are almost equal (as the Chatterjee indices converge to the CramĆ©r-von Mises indices in the sample limit). \ No newline at end of file diff --git a/docs/code/sensitivity/comparison/local_additive.py b/docs/code/sensitivity/comparison/local_additive.py new file mode 100644 index 000000000..a0893fa11 --- /dev/null +++ b/docs/code/sensitivity/comparison/local_additive.py @@ -0,0 +1,21 @@ +""" + +Auxiliary file +============================================== + +""" + +import numpy as np + + +def evaluate(X, params) -> np.array: + r"""A linear function that is used to demonstrate sensitivity indices. + + .. math:: + f(x) = a \cdot x_1 + b \cdot x_2 + """ + a, b = params + + Y = a * X[:, 0] + b * X[:, 1] + + return Y diff --git a/docs/code/sensitivity/comparison/local_ishigami.py b/docs/code/sensitivity/comparison/local_ishigami.py new file mode 100644 index 000000000..e5af649fe --- /dev/null +++ b/docs/code/sensitivity/comparison/local_ishigami.py @@ -0,0 +1,23 @@ +""" + +Auxiliary file +============================================== + +""" + +import numpy as np + + +def evaluate(X, params=[7, 0.1]): + """Non-monotonic Ishigami-Homma three parameter test function""" + + a = params[0] + b = params[1] + + Y = ( + np.sin(X[:, 0]) + + a * np.power(np.sin(X[:, 1]), 2) + + b * np.power(X[:, 2], 4) * np.sin(X[:, 0]) + ) + + return Y diff --git a/docs/code/sensitivity/comparison/plot_additive.py b/docs/code/sensitivity/comparison/plot_additive.py new file mode 100644 index 000000000..7c34e28bb --- /dev/null +++ b/docs/code/sensitivity/comparison/plot_additive.py @@ -0,0 +1,147 @@ +""" + +Additive function +============================================== + +.. math:: + f(x) = a \cdot X_1 + b \cdot X_2, \quad X_1, X_2 \sim \mathcal{N}(0, 1), \quad a,b \in \mathbb{R} + +""" + +# %% +import numpy as np +import matplotlib.pyplot as plt + +from UQpy.run_model.RunModel import RunModel +from UQpy.run_model.model_execution.PythonModel import PythonModel +from UQpy.distributions import Normal +from UQpy.distributions.collection.JointIndependent import JointIndependent +from UQpy.sensitivity.Chatterjee import Chatterjee +from UQpy.sensitivity.CramervonMises import CramervonMises as cvm +from UQpy.sensitivity.Sobol import Sobol +from UQpy.sensitivity.PostProcess import * + +np.random.seed(123) + +# %% [markdown] +# **Define the model and input distributions** + +# Create Model object +a, b = 1, 2 + +model = PythonModel( + model_script="local_additive.py", + model_object_name="evaluate", + var_names=[ + "X_1", + "X_2", + ], + delete_files=True, + params=[a, b], +) + +runmodel_obj = RunModel(model=model) + +# Define distribution object +dist_object = JointIndependent([Normal(0, 1)] * 2) + +# %% [markdown] +# **Compute Sobol indices** + +# %% [markdown] +SA_sobol = Sobol(runmodel_obj, dist_object) + +computed_indices_sobol = SA_sobol.run(n_samples=50_000) + +# %% [markdown] +# **First order Sobol indices** +# +# Expected first order Sobol indices: +# +# :math:`\mathrm{S}_1 = \frac{a^2 \cdot \mathbb{V}[X_1]}{a^2 \cdot \mathbb{V}[X_1] + b^2 \cdot \mathbb{V}[X_2]} = \frac{1^2 \cdot 1}{1^2 \cdot 1 + 2^2 \cdot 1} = 0.2` +# +# :math:`\mathrm{S}_2 = \frac{b^2 \cdot \mathbb{V}[X_2]}{a^2 \cdot \mathbb{V}[X_1] + b^2 \cdot \mathbb{V}[X_2]} = \frac{2^2 \cdot 1}{1^2 \cdot 1 + 2^2 \cdot 1} = 0.8` + +# %% +computed_indices_sobol["sobol_i"] + +# %% [markdown] +# **Compute Chatterjee indices** + +# %% [markdown] +SA_chatterjee = Chatterjee(runmodel_obj, dist_object) + +computed_indices_chatterjee = SA_chatterjee.run(n_samples=50_000) + +# %% +computed_indices_chatterjee["chatterjee_i"] + +# %% +SA_cvm = cvm(runmodel_obj, dist_object) + +# Compute CVM indices using the pick and freeze algorithm +computed_indices_cvm = SA_cvm.run(n_samples=20_000, estimate_sobol_indices=True) + +# %% +computed_indices_cvm["CVM_i"] + +# %% +# **Plot all indices** + +num_vars = 2 +_idx = np.arange(num_vars) +variable_names = [r"$X_{}$".format(i + 1) for i in range(num_vars)] + +# round to 2 decimal places +indices_1 = np.around(computed_indices_sobol["sobol_i"][:, 0], decimals=2) +indices_2 = np.around(computed_indices_chatterjee["chatterjee_i"][:, 0], decimals=2) +indices_3 = np.around(computed_indices_cvm["CVM_i"][:, 0], decimals=2) + +fig, ax = plt.subplots() +width = 0.3 +ax.spines["top"].set_visible(False) +ax.spines["right"].set_visible(False) + +bar_indices_1 = ax.bar( + _idx - width, # x-axis + indices_1, # y-axis + width=width, # bar width + color="C0", # bar color + # alpha=0.5, # bar transparency + label="Sobol", # bar label + ecolor="k", # error bar color + capsize=5, # error bar cap size in pt +) + +bar_indices_2 = ax.bar( + _idx, # x-axis + indices_2, # y-axis + width=width, # bar width + color="C2", # bar color + # alpha=0.5, # bar transparency + label="Chatterjee", # bar label + ecolor="k", # error bar color + capsize=5, # error bar cap size in pt +) + +bar_indices_3 = ax.bar( + _idx + width, # x-axis + indices_3, # y-axis + width=width, # bar width + color="C3", # bar color + # alpha=0.5, # bar transparency + label="CramĆ©r-von Mises", # bar label + ecolor="k", # error bar color + capsize=5, # error bar cap size in pt +) + +ax.bar_label(bar_indices_1, label_type="edge", fontsize=10) +ax.bar_label(bar_indices_2, label_type="edge", fontsize=10) +ax.bar_label(bar_indices_3, label_type="edge", fontsize=10) +ax.set_xticks(_idx, variable_names) +ax.set_xlabel("Model inputs") +ax.set_title("Comparison of sensitivity indices") +ax.set_ylim(top=1) # set only upper limit of y to 1 +ax.legend() + +plt.show() diff --git a/docs/code/sensitivity/comparison/plot_ishigami.py b/docs/code/sensitivity/comparison/plot_ishigami.py new file mode 100644 index 000000000..116245734 --- /dev/null +++ b/docs/code/sensitivity/comparison/plot_ishigami.py @@ -0,0 +1,165 @@ +r""" + +Ishigami function +============================================== + +The ishigami function is a non-linear, non-monotonic function that is commonly used to +benchmark uncertainty and senstivity analysis methods. + +.. math:: + f(x_1, x_2, x_3) = sin(x_1) + a \cdot sin^2(x_2) + b \cdot x_3^4 sin(x_1) + +.. math:: + x_1, x_2, x_3 \sim \mathcal{U}(-\pi, \pi), \quad a, b\in \mathbb{R} + +""" + +# %% +import numpy as np + +from UQpy.run_model.RunModel import RunModel +from UQpy.run_model.model_execution.PythonModel import PythonModel +from UQpy.distributions import Uniform +from UQpy.distributions.collection.JointIndependent import JointIndependent +from UQpy.sensitivity.Chatterjee import Chatterjee +from UQpy.sensitivity.CramervonMises import CramervonMises as cvm +from UQpy.sensitivity.Sobol import Sobol +from UQpy.sensitivity.PostProcess import * + +np.random.seed(123) + +# %% [markdown] +# **Define the model and input distributions** + +# %% +# Create Model object +model = PythonModel( + model_script="local_ishigami.py", + model_object_name="evaluate", + var_names=[r"$X_1$", "$X_2$", "$X_3$"], + delete_files=True, + params=[7, 0.1], +) + +runmodel_obj = RunModel(model=model) + +# Define distribution object +dist_object = JointIndependent([Uniform(-np.pi, 2 * np.pi)] * 3) + +# %% [markdown] +# **Compute Sobol indices** + +# %% +SA_sobol = Sobol(runmodel_obj, dist_object) + +computed_indices_sobol = SA_sobol.run(n_samples=100_000) + +# %% [markdown] +# **First order Sobol indices** +# +# Expected first order Sobol indices: +# +# :math:`S_1` = 0.3139 +# +# :math:`S_2` = 0.4424 +# +# :math:`S_3` = 0.0 + +# %% +computed_indices_sobol["sobol_i"] + +# %% [markdown] +# **Total order Sobol indices** +# +# Expected total order Sobol indices: +# +# :math:`S_{T_1}` = 0.55758886 +# +# :math:`S_{T_2}` = 0.44241114 +# +# :math:`S_{T_3}` = 0.24368366 + +# %% +computed_indices_sobol["sobol_total_i"] + +# %% [markdown] +# **Compute Chatterjee indices** + +# %% [markdown] +SA_chatterjee = Chatterjee(runmodel_obj, dist_object) + +computed_indices_chatterjee = SA_chatterjee.run(n_samples=50_000) + +# %% +computed_indices_chatterjee["chatterjee_i"] + +# %% [markdown] +# **Compute CramĆ©r-von Mises indices** +SA_cvm = cvm(runmodel_obj, dist_object) + +# Compute CVM indices using the pick and freeze algorithm +computed_indices_cvm = SA_cvm.run(n_samples=20_000, estimate_sobol_indices=True) + +# %% +computed_indices_cvm["CVM_i"] + +# %% +# **Plot all indices** + +num_vars = 3 +_idx = np.arange(num_vars) +variable_names = [r"$X_{}$".format(i + 1) for i in range(num_vars)] + +# round to 2 decimal places +indices_1 = np.around(computed_indices_sobol["sobol_i"][:, 0], decimals=2) +indices_2 = np.around(computed_indices_chatterjee["chatterjee_i"][:, 0], decimals=2) +indices_3 = np.around(computed_indices_cvm["CVM_i"][:, 0], decimals=2) + +fig, ax = plt.subplots() +width = 0.3 +ax.spines["top"].set_visible(False) +ax.spines["right"].set_visible(False) + +bar_indices_1 = ax.bar( + _idx - width, # x-axis + indices_1, # y-axis + width=width, # bar width + color="C0", # bar color + # alpha=0.5, # bar transparency + label="Sobol", # bar label + ecolor="k", # error bar color + capsize=5, # error bar cap size in pt +) + +bar_indices_2 = ax.bar( + _idx, # x-axis + indices_2, # y-axis + width=width, # bar width + color="C2", # bar color + # alpha=0.5, # bar transparency + label="Chatterjee", # bar label + ecolor="k", # error bar color + capsize=5, # error bar cap size in pt +) + +bar_indices_3 = ax.bar( + _idx + width, # x-axis + indices_3, # y-axis + width=width, # bar width + color="C3", # bar color + # alpha=0.5, # bar transparency + label="CramĆ©r-von Mises", # bar label + ecolor="k", # error bar color + capsize=5, # error bar cap size in pt +) + +ax.bar_label(bar_indices_1, label_type="edge", fontsize=10) +ax.bar_label(bar_indices_2, label_type="edge", fontsize=10) +ax.bar_label(bar_indices_3, label_type="edge", fontsize=10) +ax.set_xticks(_idx, variable_names) +ax.set_xlabel("Model inputs") +ax.set_title("Comparison of sensitivity indices") +ax.set_ylim(top=1) # set only upper limit of y to 1 +ax.legend() + +plt.show() diff --git a/docs/source/conf.py b/docs/source/conf.py index 68538001d..3c7c10316 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -95,6 +95,7 @@ "../code/sensitivity/cramer_von_mises", "../code/sensitivity/chatterjee", "../code/sensitivity/generalised_sobol", + "../code/sensitivity/comparison", "../code/stochastic_processes/bispectral", "../code/stochastic_processes/karhunen_loeve", "../code/stochastic_processes/spectral", @@ -133,6 +134,7 @@ "auto_examples/sensitivity/cramer_von_mises", "auto_examples/sensitivity/chatterjee", "auto_examples/sensitivity/generalised_sobol", + "auto_examples/sensitivity/comparison", "auto_examples/stochastic_processes/bispectral", "auto_examples/stochastic_processes/karhunen_loeve", "auto_examples/stochastic_processes/spectral", diff --git a/docs/source/sensitivity/index.rst b/docs/source/sensitivity/index.rst index 1b2a8367d..161cfd3b2 100644 --- a/docs/source/sensitivity/index.rst +++ b/docs/source/sensitivity/index.rst @@ -26,3 +26,10 @@ Sensitivity analysis comprises techniques focused on determining how the variati Morris Sensitivity Polynomial Chaos Sensitivity Sobol Sensitivity + +Examples +"""""""""" + +.. toctree:: + + Comparison of indices <../auto_examples/sensitivity/comparison/index> From 019d1cbde5eea3bdce663e61108713aa7d8fdccd Mon Sep 17 00:00:00 2001 From: Dimitris Tsapetis Date: Thu, 30 Jun 2022 16:28:22 -0400 Subject: [PATCH 58/88] Directory fix --- src/UQpy/run_model/model_execution/ThirdPartyModel.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/UQpy/run_model/model_execution/ThirdPartyModel.py b/src/UQpy/run_model/model_execution/ThirdPartyModel.py index 9b5c7b97e..548ed5a83 100644 --- a/src/UQpy/run_model/model_execution/ThirdPartyModel.py +++ b/src/UQpy/run_model/model_execution/ThirdPartyModel.py @@ -135,7 +135,7 @@ def create_model_execution_directory(self, model_dir, model_files): os.chdir(self.model_dir) - # self.logger.info("\nUQpy: The following directory has been created for model evaluations: \n" + self.model_dir) + self.logger.info("\nUQpy: The following directory has been created for model evaluations: \n" + self.model_dir) # Copy files from the model list to model run directory for file_name in model_files: full_file_name = os.path.join(self.parent_dir, file_name) @@ -144,8 +144,10 @@ def create_model_execution_directory(self, model_dir, model_files): else: new_dir_name = os.path.join(self.model_dir, os.path.basename(full_file_name)) shutil.copytree(full_file_name, new_dir_name) - # self.logger.info("\nUQpy: The model files have been copied to the following directory for evaluation: \n" - # + self.model_dir) + self.logger.info("\nUQpy: The model files have been copied to the following directory for evaluation: \n" + + self.model_dir) + parent_dir = os.path.dirname(self.model_dir) + os.chdir(parent_dir) def create_model_files_list(self, model_dir): model_files = [] From 08d9180144873404704e3f9f2d13990fdaeb4428 Mon Sep 17 00:00:00 2001 From: Prateek Bhustali Date: Sun, 3 Jul 2022 17:11:49 +0200 Subject: [PATCH 59/88] Minor fix --- docs/code/sensitivity/chatterjee/local_sobol_func.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/code/sensitivity/chatterjee/local_sobol_func.py b/docs/code/sensitivity/chatterjee/local_sobol_func.py index 1ccabc6dd..dea2e6714 100644 --- a/docs/code/sensitivity/chatterjee/local_sobol_func.py +++ b/docs/code/sensitivity/chatterjee/local_sobol_func.py @@ -27,7 +27,7 @@ def sensitivities(a_values): Total_order = np.zeros((dims, 1)) - V_i = (3 * (1 + a_values) ** 2) ** (-1) + V_i = 1 / (3 * (1 + a_values) ** 2) total_variance = np.prod(1 + V_i) - 1 From dfa17e9390bffd59958716b29a03f1edebbdc2b8 Mon Sep 17 00:00:00 2001 From: Prateek Bhustali Date: Sun, 3 Jul 2022 17:12:38 +0200 Subject: [PATCH 60/88] Added convergence study --- .../chatterjee/plot_chatterjee_sobol_func.py | 81 ++++++++++++++++--- 1 file changed, 72 insertions(+), 9 deletions(-) diff --git a/docs/code/sensitivity/chatterjee/plot_chatterjee_sobol_func.py b/docs/code/sensitivity/chatterjee/plot_chatterjee_sobol_func.py index 9da7d1cbb..5659f6d53 100644 --- a/docs/code/sensitivity/chatterjee/plot_chatterjee_sobol_func.py +++ b/docs/code/sensitivity/chatterjee/plot_chatterjee_sobol_func.py @@ -21,7 +21,10 @@ .. math:: x_i \sim \mathcal{U}(0, 1), \quad a_i \in \mathbb{R}. -.. [1] Fabrice Gamboa, Pierre Gremaud, Thierry Klein, and AgnĆØs Lagnoux. (2020). Global Sensitivity Analysis: a new generation of mighty estimators based on rank statistics. +Finally, we also compare the convergence rate of the Pick and Freeze approach with the +rank statistics approach as in [1]_. + +.. [1] Fabrice Gamboa, Pierre Gremaud, Thierry Klein, and AgnĆØs Lagnoux. (2020). Global Sensitivity Analysis: a new generation of mighty estimators based on rank statistics. (`Link `_) """ @@ -33,6 +36,7 @@ from UQpy.distributions import Uniform from UQpy.distributions.collection.JointIndependent import JointIndependent from UQpy.sensitivity.Chatterjee import Chatterjee +from UQpy.sensitivity.Sobol import Sobol from UQpy.sensitivity.PostProcess import * np.random.seed(123) @@ -42,7 +46,7 @@ # Create Model object num_vars = 6 -a_vals = np.array([0.0, 0.5, 3.0, 9.0, 99.0, 99.0]) +a_vals = np.arange(1, num_vars+1, 1) model = PythonModel( model_script="local_sobol_func.py", @@ -63,7 +67,7 @@ # %% [markdown] SA = Chatterjee(runmodel_obj, dist_object) -# Compute Chatterjee indices using the pick and freeze algorithm +# Compute Chatterjee indices using rank statistics computed_indices = SA.run(n_samples=500_000, estimate_sobol_indices=True) # %% [markdown] @@ -84,17 +88,17 @@ # # Expected first order Sobol indices: # -# :math:`S_1` = 5.86781190e-01 +# :math:`S_1` = 0.46067666 # -# :math:`S_2` = 2.60791640e-01 +# :math:`S_2` = 0.20474518 # -# :math:`S_3` = 3.66738244e-02 +# :math:`S_3` = 0.11516917 # -# :math:`S_4` = 5.86781190e-03 +# :math:`S_4` = 0.07370827 # -# :math:`S_5` = 5.86781190e-05 +# :math:`S_5` = 0.0511863 # -# :math:`S_6` = 5.86781190e-05 +# :math:`S_6` = 0.03760626 # %% computed_indices["sobol_i"] @@ -105,3 +109,62 @@ plot_title="First order Sobol indices", color="C0", ) + +# %% [markdown] +# **Comparing convergence rate of rank statistics and the Pick and Freeze approach** +# +# In the Pick-Freeze estimations, several sizes of sample N have been considered: +# N = 100, 500, 1000, 5000, 10000, 50000, and 100000. +# The Pick-Freeze procedure requires (p + 1) samples of size N. +# To have a fair comparison, the sample sizes considered in the estimation using +# rank statistics are n = (p+1)N = 7N. +# We observe that both methods converge and give precise results for large sample sizes. + +# %% + +# Compute indices values for equal number of model evaluations + +true_values = np.array([0.46067666, + 0.20474518, + 0.11516917, + 0.07370827, + 0.0511863 , + 0.03760626]) + +sample_sizes = [100, 500, 1_000, 5_000, 10_000, 50_000, 100_000] +num_studies = len(sample_sizes) + +store_pick_freeze = np.zeros((num_vars, num_studies)) +store_rank_stats = np.zeros((num_vars, num_studies)) + +SA_chatterjee = Chatterjee(runmodel_obj, dist_object) +SA_sobol = Sobol(runmodel_obj, dist_object) + +for i, sample_size in enumerate(sample_sizes): + + # Estimate using rank statistics + _indices = SA_chatterjee.run(n_samples=sample_size*7, estimate_sobol_indices=True) + store_rank_stats[:, i] = _indices["sobol_i"].ravel() + + # Estimate using Pick and Freeze approach + _indices = SA_sobol.run(n_samples=sample_size) + store_pick_freeze[:, i] = _indices["sobol_i"].ravel() + +# %% + +## Convergence plot + +fix, ax = plt.subplots(2, 3, figsize=(30, 15)) + +for k in range(num_vars): + + i, j = divmod(k, 3) # (built-in) divmod(a, b) returns a tuple (a // b, a % b) + + ax[i][j].semilogx(sample_sizes, store_rank_stats[k, :], 'ro-', label='Chatterjee estimate') + ax[i][j].semilogx(sample_sizes, store_pick_freeze[k, :], 'bx-', label='Pick and Freeze estimate') + ax[i][j].hlines(true_values[k], 0, sample_sizes[-1], 'k', label='True indices') + ax[i][j].set_title(r'$S^' + str(k+1) + '$ = ' + str(np.round(true_values[k], 4))) + +plt.suptitle('Comparing convergence of the Chatterjee estimate and the Pick and Freeze approach') +plt.legend() +plt.show() From 786c6e58b3ec37913ea2a9b6f84e2a149c86b44b Mon Sep 17 00:00:00 2001 From: Prateek Bhustali Date: Sun, 3 Jul 2022 17:13:09 +0200 Subject: [PATCH 61/88] Changed a_values in Sobol func --- .../cramer_von_mises/plot_cvm_sobol_func.py | 21 ++++++++++++------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/docs/code/sensitivity/cramer_von_mises/plot_cvm_sobol_func.py b/docs/code/sensitivity/cramer_von_mises/plot_cvm_sobol_func.py index 443073593..8624faec7 100644 --- a/docs/code/sensitivity/cramer_von_mises/plot_cvm_sobol_func.py +++ b/docs/code/sensitivity/cramer_von_mises/plot_cvm_sobol_func.py @@ -16,6 +16,11 @@ .. math:: x_i \sim \mathcal{U}(0, 1), \quad a_i \in \mathbb{R}. + +The function was also used in the Chatterjee indices section to demonstrate the +computation of the Chatterjee indices. We can see clearly that the estimates are +equivalent. + """ # %% @@ -35,7 +40,7 @@ # Create Model object num_vars = 6 -a_vals = np.array([0.0, 0.5, 3.0, 9.0, 99.0, 99.0]) +a_vals = np.arange(1, num_vars+1, 1) model = PythonModel( model_script="local_sobol_func.py", @@ -56,7 +61,7 @@ # %% SA = cvm(runmodel_obj, dist_object) -# Compute Sobol indices using the pick and freeze algorithm +# Compute Sobol indices using rank statistics computed_indices = SA.run(n_samples=50_000, estimate_sobol_indices=True) # %% [markdown] @@ -77,17 +82,17 @@ # # Expected first order Sobol indices: # -# :math:`S_1` = 5.86781190e-01 +# :math:`S_1` = 0.46067666 # -# :math:`S_2` = 2.60791640e-01 +# :math:`S_2` = 0.20474518 # -# :math:`S_3` = 3.66738244e-02 +# :math:`S_3` = 0.11516917 # -# :math:`S_4` = 5.86781190e-03 +# :math:`S_4` = 0.07370827 # -# :math:`S_5` = 5.86781190e-05 +# :math:`S_5` = 0.0511863 # -# :math:`S_6` = 5.86781190e-05 +# :math:`S_6` = 0.03760626 # %% computed_indices["sobol_i"] From af5e2a57643d0fed0317a997daa41eee1330c167 Mon Sep 17 00:00:00 2001 From: Prateek Bhustali Date: Sun, 3 Jul 2022 17:14:42 +0200 Subject: [PATCH 62/88] Minor fixes to docstring --- .../plot_generalised_sobol_mechanical_oscillator_ODE.py | 5 ++++- .../generalised_sobol/plot_generalised_sobol_multioutput.py | 5 +++++ .../code/sensitivity/sobol/plot_mechanical_oscillator_ODE.py | 5 ++++- docs/code/sensitivity/sobol/plot_sobol_additive.py | 5 +++++ docs/code/sensitivity/sobol/plot_sobol_func.py | 5 +++++ src/UQpy/sensitivity/Chatterjee.py | 4 ++-- 6 files changed, 25 insertions(+), 4 deletions(-) diff --git a/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_mechanical_oscillator_ODE.py b/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_mechanical_oscillator_ODE.py index dabc3dfe9..104703c76 100644 --- a/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_mechanical_oscillator_ODE.py +++ b/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_mechanical_oscillator_ODE.py @@ -3,7 +3,8 @@ Mechanical oscillator model (multioutput) ============================================== -The mechanical oscillator is governed by the following second-order ODE: +In this example, we consider the mechanical oscillator is governed by the following +second-order ODE as demonstrated in [1]_: .. math:: m \ddot{x} + c \dot{x} + k x = 0 @@ -20,6 +21,8 @@ parameters at each point in time, the GSI indices summarise the sensitivities of the model parameters over the entire time period. +.. [1] Gamboa, F., Janon, A., Klein, T., & Lagnoux, A. (2014). Sensitivity analysis for multidimensional and functional outputs. Electronic Journal of Statistics, 8(1), 575-603. + """ # %% diff --git a/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_multioutput.py b/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_multioutput.py index 0a2a7529c..7cdd78024 100644 --- a/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_multioutput.py +++ b/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_multioutput.py @@ -3,6 +3,9 @@ Toy multioutput function ============================================== +In this example, we demonstrate the computation of the Generalised Sobol indices using +the toy example in [1]_. + .. math:: Y = f (X_{1}, X_{2}) := \left(\begin{array}{c} X_{1}+X_{2}+X_{1} X_{2} \\ @@ -15,6 +18,8 @@ .. math:: \text{case 2: } X_1, X_2 \sim \mathcal{U}(0, 1) +.. [1] Gamboa, F., Janon, A., Klein, T., & Lagnoux, A. (2014). Sensitivity analysis for multidimensional and functional outputs. Electronic Journal of Statistics, 8(1), 575-603. + """ # %% diff --git a/docs/code/sensitivity/sobol/plot_mechanical_oscillator_ODE.py b/docs/code/sensitivity/sobol/plot_mechanical_oscillator_ODE.py index 06d1a66b1..b15e65006 100644 --- a/docs/code/sensitivity/sobol/plot_mechanical_oscillator_ODE.py +++ b/docs/code/sensitivity/sobol/plot_mechanical_oscillator_ODE.py @@ -3,7 +3,8 @@ Mechanical oscillator model (multioutput) ============================================== -The mechanical oscillator is governed by the following second-order ODE: +In this example, we consider the mechanical oscillator is governed by the following +second-order ODE as demonstrated in [1]_: .. math:: m \ddot{x} + c \dot{x} + k x = 0 @@ -20,6 +21,8 @@ pointwise-in-time Sobol indices. These indices describe the sensitivity of the model parameters at each point in time. +.. [1] Gamboa, F., Janon, A., Klein, T., & Lagnoux, A. (2014). Sensitivity analysis for multidimensional and functional outputs. Electronic Journal of Statistics, 8(1), 575-603. + """ # %% diff --git a/docs/code/sensitivity/sobol/plot_sobol_additive.py b/docs/code/sensitivity/sobol/plot_sobol_additive.py index 51ee993c0..0cb860d48 100644 --- a/docs/code/sensitivity/sobol/plot_sobol_additive.py +++ b/docs/code/sensitivity/sobol/plot_sobol_additive.py @@ -3,9 +3,14 @@ Additive function ============================================== +We introduce the variance-based Sobol indices using an elementary example. +For more details, refer [1]_. + .. math:: f(x) = a \cdot X_1 + b \cdot X_2, \quad X_1, X_2 \sim \mathcal{N}(0, 1), \quad a,b \in \mathbb{R} +.. [1] Saltelli A, T. (2008). Global sensitivity analysis: The primer. John Wiley. + """ # %% diff --git a/docs/code/sensitivity/sobol/plot_sobol_func.py b/docs/code/sensitivity/sobol/plot_sobol_func.py index 7c6058831..505a23c04 100644 --- a/docs/code/sensitivity/sobol/plot_sobol_func.py +++ b/docs/code/sensitivity/sobol/plot_sobol_func.py @@ -16,6 +16,11 @@ .. math:: x_i \sim \mathcal{U}(0, 1), \quad a_i \in \mathbb{R}. +This is an example from [1]_, where first order, total order and additionally the second +order indices are computed. + +.. [1] Glen, G., & Isaacs, K. (2012). Estimating Sobol sensitivity indices using correlations. Environmental Modelling and Software, 37, 157ā€“166. + """ # %% diff --git a/src/UQpy/sensitivity/Chatterjee.py b/src/UQpy/sensitivity/Chatterjee.py index 53c470b5b..f56b5cbc9 100644 --- a/src/UQpy/sensitivity/Chatterjee.py +++ b/src/UQpy/sensitivity/Chatterjee.py @@ -101,8 +101,8 @@ def run( """ Compute the sensitivity indices using the Chatterjee method. - :param n_samples: Number of samples used to compute the CramĆ©r-von Mises indices. \ - Default is 1,000. + :param n_samples: Number of samples used to compute the Chatterjee indices. \ + Default is 1,000. :param estimate_sobol_indices: If :code:`True`, the Sobol indices are estimated \ using the pick-and-freeze samples. From ee1d892118a4a594a99519afd071497b4275f46c Mon Sep 17 00:00:00 2001 From: Prateek Bhustali Date: Sun, 3 Jul 2022 17:15:06 +0200 Subject: [PATCH 63/88] Minor changes in documentation --- docs/source/sensitivity/chatterjee.rst | 3 ++- docs/source/sensitivity/sobol.rst | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/docs/source/sensitivity/chatterjee.rst b/docs/source/sensitivity/chatterjee.rst index 57921b4b2..42ee716e8 100644 --- a/docs/source/sensitivity/chatterjee.rst +++ b/docs/source/sensitivity/chatterjee.rst @@ -9,8 +9,9 @@ Consider :math:`n` samples of random variables :math:`X` and :math:`Y`, with :ma \xi_{n}(X, Y):=1-\frac{3 \sum_{i=1}^{n-1}\left|r_{i+1}-r_{i}\right|}{n^{2}-1} -The Chatterjee index converges for :math:`n \rightarrow \infty` to the CramĆ©r-von Mises index and is faster to estimate than using the Pick and Freeze approach in the CramĆ©r-von Mises index. +The Chatterjee index converges for :math:`n \rightarrow \infty` to the CramĆ©r-von Mises index and is faster to estimate than using the Pick and Freeze approach to compute the the CramĆ©r-von Mises index. +Furthermore, the Sobol indices can be efficiently estimated by leveraging the same rank statistics, which has the advantage that any sample can be used and no specific pick and freeze scheme is required. Chatterjee Class ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/docs/source/sensitivity/sobol.rst b/docs/source/sensitivity/sobol.rst index 60469b28c..4b482e81f 100644 --- a/docs/source/sensitivity/sobol.rst +++ b/docs/source/sensitivity/sobol.rst @@ -17,7 +17,7 @@ If the first order index of an input parameter is equal to the total order index The Sobol indices are typically computed using the Pick-and-Freeze approach for single output and multi-output models. Since there are several variants of the Pick-and-Freeze approach, the schemes implemented to compute Sobol indices are listed below: -Here, :math:`N` is the number of Monte Carlo samples and :math:`m` being the number of input parameters in the model. +Here, :math:`N` is the Monte Carlo sample size and :math:`m` is the number of input parameters in the model. 1. **First order indices** (:math:`S_{i}`) From e57f82dcdb2089869e7d942670733577378344d7 Mon Sep 17 00:00:00 2001 From: Prateek Bhustali Date: Sun, 3 Jul 2022 17:32:12 +0200 Subject: [PATCH 64/88] Described why SA outputs are different --- docs/code/sensitivity/comparison/plot_additive.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/docs/code/sensitivity/comparison/plot_additive.py b/docs/code/sensitivity/comparison/plot_additive.py index 7c34e28bb..623ffd641 100644 --- a/docs/code/sensitivity/comparison/plot_additive.py +++ b/docs/code/sensitivity/comparison/plot_additive.py @@ -3,9 +3,22 @@ Additive function ============================================== +We use an elementary example to intuitively convey the sensitivities according to +different metrics. + .. math:: f(x) = a \cdot X_1 + b \cdot X_2, \quad X_1, X_2 \sim \mathcal{N}(0, 1), \quad a,b \in \mathbb{R} +In the plot below, we note that the indices provide different sensitivities for the two +inputs. The variance-based Sobol indices use variance as a metric to quantify +sensitivity, whereas the Chatterjee/CramĆ©r-von Mises indices use the entire probability +distribution function (PDF) to quantify the sensitivity. In general, moment-free indices +provide a more holistic measure of sensitivity unlike the variance-based indices, which +are accurate mainly when the output distribution close to a Gaussian (see [1]_ for a +motivating example). + +.. [1] Borgonovo, E. (2006). Measuring uncertainty importance: Investigation and comparison of alternative approaches. Risk Analysis, 26(5), 1349-1361. + """ # %% @@ -145,3 +158,4 @@ ax.legend() plt.show() + From 02bbff761057c637862ccee41116169885a926aa Mon Sep 17 00:00:00 2001 From: Dimitris Tsapetis Date: Thu, 21 Jul 2022 12:10:09 +0300 Subject: [PATCH 65/88] Minor example fixes --- .../bayes_model_selection.py | 105 ++---------------- .../mcmc/plot_mcmc_metropolis_hastings.py | 2 +- docs/code/surrogates/gpr/plot_gpr_noisy.py | 6 +- 3 files changed, 13 insertions(+), 100 deletions(-) diff --git a/docs/code/inference/bayes_model_selection/bayes_model_selection.py b/docs/code/inference/bayes_model_selection/bayes_model_selection.py index c5981af66..88850f1f4 100644 --- a/docs/code/inference/bayes_model_selection/bayes_model_selection.py +++ b/docs/code/inference/bayes_model_selection/bayes_model_selection.py @@ -46,8 +46,9 @@ error_covariance = var_n * np.eye(50) print(param_true.shape) -z = RunModel(samples=param_true, model_script='local_pfn_models.py', model_object_name='model_quadratic', vec=False, - var_names=['theta_1', 'theta_2']) +from UQpy.run_model.model_execution.PythonModel import PythonModel +m=PythonModel(model_script='local_pfn_models.py', model_object_name='model_quadratic', var_names=['theta_1', 'theta_2']) +z = RunModel(samples=param_true, model=m) data_clean = z.qoi_list[0].reshape((-1,)) data = data_clean + Normal(scale=np.sqrt(var_n)).rvs(nsamples=data_clean.size, random_state=456).reshape((-1,)) @@ -66,55 +67,10 @@ model_prior_stds = [[10.], [1., 1.], [1., 2., 0.25]] -evidences = [] -model_posterior_means = [] -model_posterior_stds = [] -for n, model in enumerate(model_names): - # compute matrix X - X = np.linspace(0, 10, 50).reshape((-1, 1)) - if n == 1: # quadratic model - X = np.concatenate([X, X ** 2], axis=1) - if n == 2: # cubic model - X = np.concatenate([X, X ** 2, X ** 3], axis=1) - - # compute posterior pdf - m_prior = np.array(model_prior_means[n]).reshape((-1, 1)) - S_prior = np.diag(np.array(model_prior_stds[n]) ** 2) - S_posterior = np.linalg.inv(1 / var_n * np.matmul(X.T, X) + np.linalg.inv(S_prior)) - m_posterior = np.matmul(S_posterior, - 1 / var_n * np.matmul(X.T, data.reshape((-1, 1))) + np.matmul(np.linalg.inv(S_prior), - m_prior)) - m_prior = m_prior.reshape((-1,)) - m_posterior = m_posterior.reshape((-1,)) - model_posterior_means.append(list(m_posterior)) - model_posterior_stds.append(list(np.sqrt(np.diag(S_posterior)))) - print('posterior mean and covariance for ' + model) - print(m_posterior, S_posterior) - - # compute evidence, evaluate the formula at the posterior mean - like_theta = multivariate_normal.pdf(data, mean=np.matmul(X, m_posterior).reshape((-1,)), cov=error_covariance) - prior_theta = multivariate_normal.pdf(m_posterior, mean=m_prior, cov=S_prior) - posterior_theta = multivariate_normal.pdf(m_posterior, mean=m_posterior, cov=S_posterior) - evidence = like_theta * prior_theta / posterior_theta - evidences.append(evidence) - print('evidence for ' + model + '= {}\n'.format(evidence)) - -# compute the posterior probability of each model -tmp = [1 / 3 * evidence for evidence in evidences] -model_posterior_probas = [p / sum(tmp) for p in tmp] - -print('posterior probabilities of all three models') -print(model_posterior_probas) - -#%% md -# -# Define the models for use in UQpy - -#%% - candidate_models = [] for n, model_name in enumerate(model_names): - run_model = RunModel(model_script='local_pfn_models.py', model_object_name=model_name, vec=False) + m=PythonModel(model_script='local_pfn_models.py', model_object_name=model_name,) + run_model = RunModel(model=m) prior = JointIndependent([Normal(loc=m, scale=std) for m, std in zip(model_prior_means[n], model_prior_stds[n])]) model = ComputationalModel(n_parameters=model_n_params[n], @@ -123,60 +79,18 @@ name=model_name) candidate_models.append(model) - -#%% md -# -# Run MCMC for one model - -#%% - -# Quadratic model -sampling = MetropolisHastings(args_target=(data, ), - log_pdf_target=candidate_models[1].evaluate_log_posterior, - jump=10, burn_length=100, - proposal=JointIndependent([Normal(scale=0.1), ] * 2), - seed=[0., 0.], random_state=123) - -bayesMCMC = BayesParameterEstimation(sampling_class=sampling, - inference_model=candidate_models[1], - data=data, - nsamples=3500) - -# plot prior, true posterior and estimated posterior -fig, ax = plt.subplots(1, 2, figsize=(16, 5)) -for n_p in range(2): - domain_plot = np.linspace(-0.5, 3, 200) - ax[n_p].plot(domain_plot, norm.pdf(domain_plot, loc=model_prior_means[1][n_p], scale=model_prior_stds[1][n_p]), - label='prior', color='green', linestyle='--') - ax[n_p].plot(domain_plot, norm.pdf(domain_plot, loc=model_posterior_means[1][n_p], - scale=model_posterior_stds[1][n_p]), - label='true posterior', color='red', linestyle='-') - ax[n_p].hist(bayesMCMC.sampler.samples[:, n_p], density=True, bins=30, label='estimated posterior MCMC') - ax[n_p].legend() - ax[n_p].set_title('MCMC for quadratic model') -plt.show() - - -#%% md -# -# Run Bayesian Model Selection for all three models - -#%% - proposals = [Normal(scale=0.1), JointIndependent([Normal(scale=0.1), Normal(scale=0.1)]), JointIndependent([Normal(scale=0.15), Normal(scale=0.1), Normal(scale=0.05)])] -nsamples = [2000, 6000, 14000] -nburn = [500, 2000, 4000] -jump = [5, 10, 25] +nsamples = [2000, 2000, 2000] +nburn = [1000, 1000, 1000] +jump = [2, 2, 2] sampling_inputs=list() estimators = [] for i in range(3): - sampling = MetropolisHastings(args_target=(data, ), - log_pdf_target=candidate_models[i].evaluate_log_posterior, - jump=jump[i], + sampling = MetropolisHastings(jump=jump[i], burn_length=nburn[i], proposal=proposals[i], seed=model_prior_means[i], @@ -185,7 +99,6 @@ sampling_class=sampling)) selection = BayesModelSelection(parameter_estimators=estimators, - data=data, prior_probabilities=[1. / 3., 1. / 3., 1. / 3.], nsamples=nsamples) diff --git a/docs/code/sampling/mcmc/plot_mcmc_metropolis_hastings.py b/docs/code/sampling/mcmc/plot_mcmc_metropolis_hastings.py index 4b433856e..c532f14eb 100644 --- a/docs/code/sampling/mcmc/plot_mcmc_metropolis_hastings.py +++ b/docs/code/sampling/mcmc/plot_mcmc_metropolis_hastings.py @@ -58,7 +58,7 @@ def log_rosenbrock_with_param(x, p): plt.show() plt.figure() -x = MetropolisHastings(dimension=2, pdf_target=log_rosenbrock_with_param, burn_length=500, +x = MetropolisHastings(dimension=2, log_pdf_target=log_rosenbrock_with_param, burn_length=500, jump=50, n_chains=1, args_target=(20,), nsamples=500) plt.plot(x.samples[:, 0], x.samples[:, 1], 'o') diff --git a/docs/code/surrogates/gpr/plot_gpr_noisy.py b/docs/code/surrogates/gpr/plot_gpr_noisy.py index 9f5b4883c..46d758274 100644 --- a/docs/code/surrogates/gpr/plot_gpr_noisy.py +++ b/docs/code/surrogates/gpr/plot_gpr_noisy.py @@ -25,8 +25,7 @@ warnings.filterwarnings('ignore') from UQpy.utilities.MinimizeOptimizer import MinimizeOptimizer from UQpy.surrogates.gaussian_process.regression_models.LinearRegression import LinearRegression -from UQpy.utilities.FminCobyla import FminCobyla -from UQpy.surrogates import GaussianProcessRegression, NonNegative, RBF +from UQpy.surrogates import GaussianProcessRegression, RBF # %% md @@ -83,7 +82,7 @@ def funct(x): # %% bounds_2 = [[10**(-3), 10**3], [10**(-3), 10**2], [10**(-3), 10**(2)]] -optimizer2 = MinimizeOptimizer(method='L-BFGS-B', bounds=bounds_2) +optimizer2 = MinimizeOptimizer(bounds=bounds_2) # %% md @@ -150,3 +149,4 @@ def funct(x): plt.title('GP Surrogate (Noise, No Constraints)') ax.legend(loc="upper right",prop={'size': 12}); plt.grid() +plt.show() From 813ae54e75164b74317695aeba218fe949b537dd Mon Sep 17 00:00:00 2001 From: Dimitris Tsapetis Date: Tue, 9 Aug 2022 12:11:46 +0300 Subject: [PATCH 66/88] Naming fixes --- .../chatterjee/plot_chatterjee_exponential.py | 4 +- .../chatterjee/plot_chatterjee_ishigami.py | 6 +- .../chatterjee/plot_chatterjee_sobol_func.py | 10 +- .../sensitivity/comparison/plot_additive.py | 10 +- .../sensitivity/comparison/plot_ishigami.py | 10 +- .../cramer_von_mises/plot_cvm_exponential.py | 2 +- .../cramer_von_mises/plot_cvm_sobol_func.py | 2 +- ...ralised_sobol_mechanical_oscillator_ODE.py | 4 +- .../plot_generalised_sobol_multioutput.py | 6 +- .../sobol/plot_mechanical_oscillator_ODE.py | 4 +- .../sensitivity/sobol/plot_sobol_additive.py | 4 +- .../code/sensitivity/sobol/plot_sobol_func.py | 4 +- .../sensitivity/sobol/plot_sobol_ishigami.py | 4 +- docs/source/sensitivity/sobol.rst | 4 + requirements.txt | 2 +- ...Chatterjee.py => ChatterjeeSensitivity.py} | 88 +++++--------- ...nMises.py => CramerVonMisesSensitivity.py} | 107 ++++++------------ ...obol.py => GeneralisedSobolSensitivity.py} | 84 ++++---------- .../{Sobol.py => SobolSensitivity.py} | 77 ++++++------- src/UQpy/sensitivity/__init__.py | 16 +-- src/UQpy/sensitivity/baseclass/Sensitivity.py | 75 ++++++------ .../unit_tests/sensitivity/test_baseclass.py | 8 +- .../unit_tests/sensitivity/test_chatterjee.py | 6 +- .../sensitivity/test_cramer_von_mises.py | 6 +- .../sensitivity/test_generalised_sobol.py | 6 +- tests/unit_tests/sensitivity/test_sobol.py | 8 +- 26 files changed, 226 insertions(+), 331 deletions(-) rename src/UQpy/sensitivity/{Chatterjee.py => ChatterjeeSensitivity.py} (84%) rename src/UQpy/sensitivity/{CramervonMises.py => CramerVonMisesSensitivity.py} (74%) rename src/UQpy/sensitivity/{GeneralisedSobol.py => GeneralisedSobolSensitivity.py} (83%) rename src/UQpy/sensitivity/{Sobol.py => SobolSensitivity.py} (94%) diff --git a/docs/code/sensitivity/chatterjee/plot_chatterjee_exponential.py b/docs/code/sensitivity/chatterjee/plot_chatterjee_exponential.py index 0c67c7452..04cf4cf03 100644 --- a/docs/code/sensitivity/chatterjee/plot_chatterjee_exponential.py +++ b/docs/code/sensitivity/chatterjee/plot_chatterjee_exponential.py @@ -21,7 +21,7 @@ from UQpy.run_model.model_execution.PythonModel import PythonModel from UQpy.distributions import Normal from UQpy.distributions.collection.JointIndependent import JointIndependent -from UQpy.sensitivity.Chatterjee import Chatterjee +from UQpy.sensitivity.ChatterjeeSensitivity import ChatterjeeSensitivity from UQpy.sensitivity.PostProcess import * np.random.seed(123) @@ -49,7 +49,7 @@ # **Compute Chatterjee indices** # %% [markdown] -SA = Chatterjee(runmodel_obj, dist_object) +SA = ChatterjeeSensitivity(runmodel_obj, dist_object) # Compute Chatterjee indices using the pick and freeze algorithm computed_indices = SA.run(n_samples=1_000_000) diff --git a/docs/code/sensitivity/chatterjee/plot_chatterjee_ishigami.py b/docs/code/sensitivity/chatterjee/plot_chatterjee_ishigami.py index d7f08dcc5..696a07fee 100644 --- a/docs/code/sensitivity/chatterjee/plot_chatterjee_ishigami.py +++ b/docs/code/sensitivity/chatterjee/plot_chatterjee_ishigami.py @@ -21,7 +21,7 @@ from UQpy.run_model.model_execution.PythonModel import PythonModel from UQpy.distributions import Uniform from UQpy.distributions.collection.JointIndependent import JointIndependent -from UQpy.sensitivity.Chatterjee import Chatterjee +from UQpy.sensitivity.ChatterjeeSensitivity import ChatterjeeSensitivity from UQpy.sensitivity.PostProcess import * np.random.seed(123) @@ -47,12 +47,12 @@ # **Compute Chatterjee indices** # %% [markdown] -SA = Chatterjee(runmodel_obj, dist_object) +SA = ChatterjeeSensitivity(runmodel_obj, dist_object) computed_indices = SA.run( n_samples=100_000, estimate_sobol_indices=True, - num_bootstrap_samples=100, + n_bootstrap_samples=100, confidence_level=0.95, ) diff --git a/docs/code/sensitivity/chatterjee/plot_chatterjee_sobol_func.py b/docs/code/sensitivity/chatterjee/plot_chatterjee_sobol_func.py index 5659f6d53..3d54ad38d 100644 --- a/docs/code/sensitivity/chatterjee/plot_chatterjee_sobol_func.py +++ b/docs/code/sensitivity/chatterjee/plot_chatterjee_sobol_func.py @@ -35,8 +35,8 @@ from UQpy.run_model.model_execution.PythonModel import PythonModel from UQpy.distributions import Uniform from UQpy.distributions.collection.JointIndependent import JointIndependent -from UQpy.sensitivity.Chatterjee import Chatterjee -from UQpy.sensitivity.Sobol import Sobol +from UQpy.sensitivity.ChatterjeeSensitivity import ChatterjeeSensitivity +from UQpy.sensitivity.SobolSensitivity import SobolSensitivity from UQpy.sensitivity.PostProcess import * np.random.seed(123) @@ -65,7 +65,7 @@ # **Compute Chatterjee indices** # %% [markdown] -SA = Chatterjee(runmodel_obj, dist_object) +SA = ChatterjeeSensitivity(runmodel_obj, dist_object) # Compute Chatterjee indices using rank statistics computed_indices = SA.run(n_samples=500_000, estimate_sobol_indices=True) @@ -137,8 +137,8 @@ store_pick_freeze = np.zeros((num_vars, num_studies)) store_rank_stats = np.zeros((num_vars, num_studies)) -SA_chatterjee = Chatterjee(runmodel_obj, dist_object) -SA_sobol = Sobol(runmodel_obj, dist_object) +SA_chatterjee = ChatterjeeSensitivity(runmodel_obj, dist_object) +SA_sobol = SobolSensitivity(runmodel_obj, dist_object) for i, sample_size in enumerate(sample_sizes): diff --git a/docs/code/sensitivity/comparison/plot_additive.py b/docs/code/sensitivity/comparison/plot_additive.py index 623ffd641..8dd101a1f 100644 --- a/docs/code/sensitivity/comparison/plot_additive.py +++ b/docs/code/sensitivity/comparison/plot_additive.py @@ -29,9 +29,9 @@ from UQpy.run_model.model_execution.PythonModel import PythonModel from UQpy.distributions import Normal from UQpy.distributions.collection.JointIndependent import JointIndependent -from UQpy.sensitivity.Chatterjee import Chatterjee -from UQpy.sensitivity.CramervonMises import CramervonMises as cvm -from UQpy.sensitivity.Sobol import Sobol +from UQpy.sensitivity.ChatterjeeSensitivity import ChatterjeeSensitivity +from UQpy.sensitivity.CramerVonMisesSensitivity import CramerVonMisesSensitivity as cvm +from UQpy.sensitivity.SobolSensitivity import SobolSensitivity from UQpy.sensitivity.PostProcess import * np.random.seed(123) @@ -62,7 +62,7 @@ # **Compute Sobol indices** # %% [markdown] -SA_sobol = Sobol(runmodel_obj, dist_object) +SA_sobol = SobolSensitivity(runmodel_obj, dist_object) computed_indices_sobol = SA_sobol.run(n_samples=50_000) @@ -82,7 +82,7 @@ # **Compute Chatterjee indices** # %% [markdown] -SA_chatterjee = Chatterjee(runmodel_obj, dist_object) +SA_chatterjee = ChatterjeeSensitivity(runmodel_obj, dist_object) computed_indices_chatterjee = SA_chatterjee.run(n_samples=50_000) diff --git a/docs/code/sensitivity/comparison/plot_ishigami.py b/docs/code/sensitivity/comparison/plot_ishigami.py index 116245734..958cd5821 100644 --- a/docs/code/sensitivity/comparison/plot_ishigami.py +++ b/docs/code/sensitivity/comparison/plot_ishigami.py @@ -21,9 +21,9 @@ from UQpy.run_model.model_execution.PythonModel import PythonModel from UQpy.distributions import Uniform from UQpy.distributions.collection.JointIndependent import JointIndependent -from UQpy.sensitivity.Chatterjee import Chatterjee -from UQpy.sensitivity.CramervonMises import CramervonMises as cvm -from UQpy.sensitivity.Sobol import Sobol +from UQpy.sensitivity.ChatterjeeSensitivity import ChatterjeeSensitivity +from UQpy.sensitivity.CramerVonMisesSensitivity import CramerVonMisesSensitivity as cvm +from UQpy.sensitivity.SobolSensitivity import SobolSensitivity from UQpy.sensitivity.PostProcess import * np.random.seed(123) @@ -50,7 +50,7 @@ # **Compute Sobol indices** # %% -SA_sobol = Sobol(runmodel_obj, dist_object) +SA_sobol = SobolSensitivity(runmodel_obj, dist_object) computed_indices_sobol = SA_sobol.run(n_samples=100_000) @@ -86,7 +86,7 @@ # **Compute Chatterjee indices** # %% [markdown] -SA_chatterjee = Chatterjee(runmodel_obj, dist_object) +SA_chatterjee = ChatterjeeSensitivity(runmodel_obj, dist_object) computed_indices_chatterjee = SA_chatterjee.run(n_samples=50_000) diff --git a/docs/code/sensitivity/cramer_von_mises/plot_cvm_exponential.py b/docs/code/sensitivity/cramer_von_mises/plot_cvm_exponential.py index 6f3f74b93..81b258bf1 100644 --- a/docs/code/sensitivity/cramer_von_mises/plot_cvm_exponential.py +++ b/docs/code/sensitivity/cramer_von_mises/plot_cvm_exponential.py @@ -20,7 +20,7 @@ from UQpy.run_model.model_execution.PythonModel import PythonModel from UQpy.distributions import Normal from UQpy.distributions.collection.JointIndependent import JointIndependent -from UQpy.sensitivity.CramervonMises import CramervonMises as cvm +from UQpy.sensitivity.CramerVonMisesSensitivity import CramerVonMisesSensitivity as cvm from UQpy.sensitivity.PostProcess import * np.random.seed(123) diff --git a/docs/code/sensitivity/cramer_von_mises/plot_cvm_sobol_func.py b/docs/code/sensitivity/cramer_von_mises/plot_cvm_sobol_func.py index 8624faec7..a69ab0e99 100644 --- a/docs/code/sensitivity/cramer_von_mises/plot_cvm_sobol_func.py +++ b/docs/code/sensitivity/cramer_von_mises/plot_cvm_sobol_func.py @@ -30,7 +30,7 @@ from UQpy.run_model.model_execution.PythonModel import PythonModel from UQpy.distributions import Uniform from UQpy.distributions.collection.JointIndependent import JointIndependent -from UQpy.sensitivity.CramervonMises import CramervonMises as cvm +from UQpy.sensitivity.CramerVonMisesSensitivity import CramerVonMisesSensitivity as cvm from UQpy.sensitivity.PostProcess import * np.random.seed(123) diff --git a/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_mechanical_oscillator_ODE.py b/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_mechanical_oscillator_ODE.py index 104703c76..e9900578b 100644 --- a/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_mechanical_oscillator_ODE.py +++ b/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_mechanical_oscillator_ODE.py @@ -32,7 +32,7 @@ from UQpy.run_model.model_execution.PythonModel import PythonModel from UQpy.distributions import Uniform, Normal from UQpy.distributions.collection.JointIndependent import JointIndependent -from UQpy.sensitivity.GeneralisedSobol import GeneralisedSobol +from UQpy.sensitivity.GeneralisedSobolSensitivity import GeneralisedSobolSensitivity from UQpy.sensitivity.PostProcess import * np.random.seed(123) @@ -61,7 +61,7 @@ # **Compute generalised Sobol indices** # %% [markdown] -SA = GeneralisedSobol(runmodel_obj, dist_object) +SA = GeneralisedSobolSensitivity(runmodel_obj, dist_object) computed_indices = SA.run(n_samples=500) diff --git a/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_multioutput.py b/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_multioutput.py index 7cdd78024..039817373 100644 --- a/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_multioutput.py +++ b/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_multioutput.py @@ -27,7 +27,7 @@ from UQpy.run_model.model_execution.PythonModel import PythonModel from UQpy.distributions import Uniform, Normal from UQpy.distributions.collection.JointIndependent import JointIndependent -from UQpy.sensitivity.GeneralisedSobol import GeneralisedSobol +from UQpy.sensitivity.GeneralisedSobolSensitivity import GeneralisedSobolSensitivity from UQpy.sensitivity.PostProcess import * np.random.seed(123) @@ -52,7 +52,7 @@ # **Compute generalised Sobol indices** # %% [markdown] -SA = GeneralisedSobol(runmodel_obj, dist_object_1) +SA = GeneralisedSobolSensitivity(runmodel_obj, dist_object_1) computed_indices = SA.run( n_samples=20_000, confidence_level=0.95, num_bootstrap_samples=5_00 @@ -100,7 +100,7 @@ # %% [markdown] dist_object_2 = JointIndependent([Uniform(0, 1)] * 2) -SA = GeneralisedSobol(runmodel_obj, dist_object_2) +SA = GeneralisedSobolSensitivity(runmodel_obj, dist_object_2) computed_indices = SA.run( n_samples=20_000, confidence_level=0.95, num_bootstrap_samples=5_00 diff --git a/docs/code/sensitivity/sobol/plot_mechanical_oscillator_ODE.py b/docs/code/sensitivity/sobol/plot_mechanical_oscillator_ODE.py index b15e65006..0403e8a54 100644 --- a/docs/code/sensitivity/sobol/plot_mechanical_oscillator_ODE.py +++ b/docs/code/sensitivity/sobol/plot_mechanical_oscillator_ODE.py @@ -33,7 +33,7 @@ from UQpy.run_model.model_execution.PythonModel import PythonModel from UQpy.distributions import Uniform from UQpy.distributions.collection.JointIndependent import JointIndependent -from UQpy.sensitivity.Sobol import Sobol +from UQpy.sensitivity.SobolSensitivity import SobolSensitivity # %% [markdown] # **Define the model and input distributions** @@ -59,7 +59,7 @@ # **Compute Sobol indices** # %% [markdown] -SA = Sobol(runmodel_obj, dist_object) +SA = SobolSensitivity(runmodel_obj, dist_object) computed_indices = SA.run(n_samples=500) diff --git a/docs/code/sensitivity/sobol/plot_sobol_additive.py b/docs/code/sensitivity/sobol/plot_sobol_additive.py index 0cb860d48..dca212fe6 100644 --- a/docs/code/sensitivity/sobol/plot_sobol_additive.py +++ b/docs/code/sensitivity/sobol/plot_sobol_additive.py @@ -20,7 +20,7 @@ from UQpy.run_model.model_execution.PythonModel import PythonModel from UQpy.distributions import Normal from UQpy.distributions.collection.JointIndependent import JointIndependent -from UQpy.sensitivity.Sobol import Sobol +from UQpy.sensitivity.SobolSensitivity import SobolSensitivity from UQpy.sensitivity.PostProcess import * np.random.seed(123) @@ -51,7 +51,7 @@ # **Compute Sobol indices** # %% [markdown] -SA = Sobol(runmodel_obj, dist_object) +SA = SobolSensitivity(runmodel_obj, dist_object) computed_indices = SA.run(n_samples=50_000) diff --git a/docs/code/sensitivity/sobol/plot_sobol_func.py b/docs/code/sensitivity/sobol/plot_sobol_func.py index 505a23c04..ba28d3e79 100644 --- a/docs/code/sensitivity/sobol/plot_sobol_func.py +++ b/docs/code/sensitivity/sobol/plot_sobol_func.py @@ -30,7 +30,7 @@ from UQpy.run_model.model_execution.PythonModel import PythonModel from UQpy.distributions import Uniform from UQpy.distributions.collection.JointIndependent import JointIndependent -from UQpy.sensitivity.Sobol import Sobol +from UQpy.sensitivity.SobolSensitivity import SobolSensitivity from UQpy.sensitivity.PostProcess import * np.random.seed(123) @@ -59,7 +59,7 @@ # **Compute Sobol indices** # %% [markdown] -SA = Sobol(runmodel_obj, dist_object) +SA = SobolSensitivity(runmodel_obj, dist_object) # Compute Sobol indices using the pick and freeze algorithm computed_indices = SA.run(n_samples=50_000, estimate_second_order=True) diff --git a/docs/code/sensitivity/sobol/plot_sobol_ishigami.py b/docs/code/sensitivity/sobol/plot_sobol_ishigami.py index b63420290..dbedfde4c 100644 --- a/docs/code/sensitivity/sobol/plot_sobol_ishigami.py +++ b/docs/code/sensitivity/sobol/plot_sobol_ishigami.py @@ -45,7 +45,7 @@ from UQpy.run_model.model_execution.PythonModel import PythonModel from UQpy.distributions import Uniform from UQpy.distributions.collection.JointIndependent import JointIndependent -from UQpy.sensitivity.Sobol import Sobol +from UQpy.sensitivity.SobolSensitivity import SobolSensitivity from UQpy.sensitivity.PostProcess import * np.random.seed(123) @@ -72,7 +72,7 @@ # **Compute Sobol indices** # %% -SA = Sobol(runmodel_obj, dist_object) +SA = SobolSensitivity(runmodel_obj, dist_object) computed_indices = SA.run(n_samples=100_000, num_bootstrap_samples=100) diff --git a/docs/source/sensitivity/sobol.rst b/docs/source/sensitivity/sobol.rst index 4b482e81f..56c704a96 100644 --- a/docs/source/sensitivity/sobol.rst +++ b/docs/source/sensitivity/sobol.rst @@ -63,6 +63,10 @@ Sobol Class The :class:`Sobol` class is imported using the following command: +>>> from UQpy.sensitivity.SobolSensitivity import Sobol + +>>> from UQpy.sensitivity.SobolSensitivity import Sobol + >>> from UQpy.sensitivity.Sobol import Sobol Methods diff --git a/requirements.txt b/requirements.txt index 588ce75cf..1386fa38c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ numpy == 1.22.3 scipy == 1.8.0 -matplotlib == 3.3.3 +matplotlib == 3.5.2 scikit-learn == 1.0.2 fire == 0.4.0 pytest == 6.1.2 diff --git a/src/UQpy/sensitivity/Chatterjee.py b/src/UQpy/sensitivity/ChatterjeeSensitivity.py similarity index 84% rename from src/UQpy/sensitivity/Chatterjee.py rename to src/UQpy/sensitivity/ChatterjeeSensitivity.py index f56b5cbc9..00ef7a940 100644 --- a/src/UQpy/sensitivity/Chatterjee.py +++ b/src/UQpy/sensitivity/ChatterjeeSensitivity.py @@ -27,7 +27,7 @@ from numbers import Integral from UQpy.sensitivity.baseclass.Sensitivity import Sensitivity -from UQpy.sensitivity.Sobol import compute_first_order as compute_first_order_sobol +from UQpy.sensitivity.SobolSensitivity import compute_first_order as compute_first_order_sobol from UQpy.utilities.ValidationTypes import ( RandomStateType, PositiveInteger, @@ -38,7 +38,7 @@ from UQpy.utilities.UQpyLoggingFormatter import UQpyLoggingFormatter -class Chatterjee(Sensitivity): +class ChatterjeeSensitivity(Sensitivity): """ Compute sensitivity indices using the Chatterjee correlation coefficient. @@ -58,33 +58,23 @@ class Chatterjee(Sensitivity): **Methods:** """ - def __init__(self, runmodel_object, dist_object, random_state=None, **kwargs): - super().__init__( - runmodel_object, dist_object, random_state=random_state, **kwargs - ) + def __init__(self, runmodel_object, dist_object, random_state=None): + super().__init__(runmodel_object, dist_object, random_state=random_state) # Create logger with the same name as the class self.logger = logging.getLogger(__name__) - self.logger.setLevel(logging.ERROR) - frmt = UQpyLoggingFormatter() - # create console handler with a higher log level - ch = logging.StreamHandler() - ch.setFormatter(frmt) + self.first_order_chatterjee_indices = None + "Chatterjee sensitivity indices (First order), :class:`numpy.ndarray` of shape :code:`(n_variables, 1)`" - # add the handler to the logger - self.logger.addHandler(ch) + self.first_order_sobol_indices = None + "Sobol indices computed using the rank statistics, :class:`numpy.ndarray` of shape :code:`(n_variables, 1)`" - self.chatterjee_i = None - "Chatterjee sensitivity indices (First order), :class:`numpy.ndarray` of shape :code:`(num_vars, 1)`" + self.confidence_interval_chatterjee = None + "Confidence intervals for the Chatterjee sensitivity indices, :class:`numpy.ndarray` of " \ + "shape :code:`(n_variables, 2)`" - self.sobol_i = None - "Sobol indices computed using the rank statistics, :class:`numpy.ndarray` of shape :code:`(num_vars, 1)`" - - self.confidence_interval_chatterjee_i = None - "Confidence intervals for the Chatterjee sensitivity indices, :class:`numpy.ndarray` of shape :code:`(num_vars, 2)`" - - self.num_vars = None + self.n_variables = None "Number of input random variables, :class:`int`" self.n_samples = None @@ -95,7 +85,7 @@ def run( self, n_samples: PositiveInteger = 1_000, estimate_sobol_indices: bool = False, - num_bootstrap_samples: PositiveInteger = None, + n_bootstrap_samples: PositiveInteger = None, confidence_level: PositiveFloat = 0.95, ): """ @@ -107,7 +97,7 @@ def run( :param estimate_sobol_indices: If :code:`True`, the Sobol indices are estimated \ using the pick-and-freeze samples. - :param num_bootstrap_samples: Number of bootstrap samples used to estimate the \ + :param n_bootstrap_samples: Number of bootstrap samples used to estimate the \ Sobol indices. Default is :any:`None`. :param confidence_level: Confidence level used to compute the confidence \ @@ -126,13 +116,11 @@ def run( raise TypeError("UQpy: nsamples should be an integer") # Check num_bootstrap_samples data type - if num_bootstrap_samples is not None: - if not isinstance(num_bootstrap_samples, int): - raise TypeError("UQpy: num_bootstrap_samples should be an integer.\n") - elif num_bootstrap_samples is None: + if n_bootstrap_samples is None: self.logger.info( - "UQpy: num_bootstrap_samples is set to None, confidence intervals will not be computed.\n" - ) + "UQpy: num_bootstrap_samples is set to None, confidence intervals will not be computed.\n") + elif not isinstance(n_bootstrap_samples, int): + raise TypeError("UQpy: num_bootstrap_samples should be an integer.\n") ################## GENERATE SAMPLES ################## @@ -140,7 +128,7 @@ def run( self.logger.info("UQpy: Generated samples successfully.\n") - self.num_vars = A_samples.shape[1] # number of variables + self.n_variables = A_samples.shape[1] # number of variables ################# MODEL EVALUATIONS #################### @@ -148,60 +136,44 @@ def run( self.logger.info("UQpy: Model evaluations completed.\n") - ######################### STORAGE ######################## - # Create dictionary to store the sensitivity indices - computed_indices = {} ################## COMPUTE CHATTERJEE INDICES ################## - self.chatterjee_i = self.compute_chatterjee_indices(A_samples, A_model_evals) + self.first_order_chatterjee_indices = self.compute_chatterjee_indices(A_samples, A_model_evals) self.logger.info("UQpy: Chatterjee indices computed successfully.\n") - # Store the indices in the dictionary - computed_indices["chatterjee_i"] = self.chatterjee_i ################## COMPUTE SOBOL INDICES ################## self.logger.info("UQpy: Computing First order Sobol indices ...\n") if estimate_sobol_indices: - f_C_i_model_evals = self.compute_rank_analog_of_f_C_i( - A_samples, A_model_evals - ) + f_C_i_model_evals = self.compute_rank_analog_of_f_C_i(A_samples, A_model_evals) - self.sobol_i = self.compute_Sobol_indices(A_model_evals, f_C_i_model_evals) + self.first_order_sobol_indices = self.compute_Sobol_indices(A_model_evals, f_C_i_model_evals) self.logger.info("UQpy: First order Sobol indices computed successfully.\n") - # Store the indices in the dictionary - computed_indices["sobol_i"] = self.sobol_i ################## CONFIDENCE INTERVALS #################### - if num_bootstrap_samples is not None: + if n_bootstrap_samples is not None: self.logger.info("UQpy: Computing confidence intervals ...\n") estimator_inputs = [A_samples, A_model_evals] - self.confidence_interval_chatterjee_i = self.bootstrapping( + self.confidence_interval_chatterjee = self.bootstrapping( self.compute_chatterjee_indices, estimator_inputs, - computed_indices["chatterjee_i"], - num_bootstrap_samples, + self.first_order_chatterjee_indices, + n_bootstrap_samples, confidence_level, ) - self.logger.info( - "UQpy: Confidence intervals for Chatterjee indices computed successfully.\n" - ) - - computed_indices[ - "confidence_interval_chatterjee_i" - ] = self.confidence_interval_chatterjee_i + self.logger.info("UQpy: Confidence intervals for Chatterjee indices computed successfully.\n") - return computed_indices @staticmethod @beartype @@ -430,9 +402,7 @@ def compute_Sobol_indices( n_outputs = 1 C_i_model_evals = C_i_model_evals.reshape((n_outputs, *_shape)) - first_order_sobol = compute_first_order_sobol( - A_model_evals, None, C_i_model_evals, scheme="Sobol1993" - ) + first_order_sobol = compute_first_order_sobol(A_model_evals, None, C_i_model_evals, scheme="Sobol1993") return first_order_sobol @@ -466,7 +436,7 @@ def compute_rank_analog_of_f_C_i( f_A = A_model_evals N = f_A.shape[0] - m = self.num_vars + m = self.n_variables A_i_model_evals = np.zeros((N, m)) diff --git a/src/UQpy/sensitivity/CramervonMises.py b/src/UQpy/sensitivity/CramerVonMisesSensitivity.py similarity index 74% rename from src/UQpy/sensitivity/CramervonMises.py rename to src/UQpy/sensitivity/CramerVonMisesSensitivity.py index 745557cd9..459b2f9e9 100644 --- a/src/UQpy/sensitivity/CramervonMises.py +++ b/src/UQpy/sensitivity/CramerVonMisesSensitivity.py @@ -22,8 +22,8 @@ from UQpy.sensitivity.baseclass.Sensitivity import Sensitivity from UQpy.sensitivity.baseclass.PickFreeze import generate_pick_freeze_samples -from UQpy.sensitivity.Sobol import compute_first_order as compute_first_order_sobol -from UQpy.sensitivity.Sobol import compute_total_order as compute_total_order_sobol +from UQpy.sensitivity.SobolSensitivity import compute_first_order as compute_first_order_sobol +from UQpy.sensitivity.SobolSensitivity import compute_total_order as compute_total_order_sobol from UQpy.utilities.UQpyLoggingFormatter import UQpyLoggingFormatter from UQpy.utilities.ValidationTypes import ( PositiveInteger, @@ -36,7 +36,7 @@ # TODO: Sampling strategies -class CramervonMises(Sensitivity): +class CramerVonMisesSensitivity(Sensitivity): """ Compute the CramĆ©r-von Mises indices. @@ -57,41 +57,33 @@ class CramervonMises(Sensitivity): """ def __init__( - self, runmodel_object, dist_object, random_state=None, **kwargs + self, runmodel_object, dist_object, random_state=None ) -> None: - super().__init__( - runmodel_object, dist_object, random_state=random_state, **kwargs - ) + super().__init__(runmodel_object, dist_object, random_state=random_state) # Create logger with the same name as the class self.logger = logging.getLogger(__name__) - self.logger.setLevel(logging.ERROR) - frmt = UQpyLoggingFormatter() - # create console handler with a higher log level - ch = logging.StreamHandler() - ch.setFormatter(frmt) + self.first_order_CramerVonMises_indices = None + "First order CramĆ©r-von Mises indices, :class:`numpy.ndarray` of shape :code:`(n_variables, 1)`" - # add the handler to the logger - self.logger.addHandler(ch) + self.confidence_interval_CramerVonMises = None + "Confidence intervals of the first order CramĆ©r-von Mises indices, :class:`numpy.ndarray` " \ + "of shape :code:`(n_variables, 2)`" - self.CVM_i = None - "First order CramĆ©r-von Mises indices, :class:`numpy.ndarray` of shape :code:`(num_vars, 1)`" + self.first_order_sobol_indices = None + "First order Sobol indices computed using the pick-and-freeze samples, :class:`numpy.ndarray` " \ + "of shape :code:`(n_variables, 1)`" - self.confidence_interval_CVM_i = None - "Confidence intervals of the first order CramĆ©r-von Mises indices, :class:`numpy.ndarray` of shape :code:`(num_vars, 2)`" - - self.sobol_i = None - "First order Sobol indices computed using the pick-and-freeze samples, :class:`numpy.ndarray` of shape :code:`(num_vars, 1)`" - - self.sobol_total_i = None - "Total order Sobol indices computed using the pick-and-freeze samples, :class:`numpy.ndarray` of shape :code:`(num_vars, 1)`" + self.total_order_sobol_indices = None + "Total order Sobol indices computed using the pick-and-freeze samples, :class:`numpy.ndarray` " \ + "of shape :code:`(n_variables, 1)`" self.n_samples = None "Number of samples used to compute the CramĆ©r-von Mises indices, :class:`int`" - self.num_vars = None + self.n_variables = None "Number of input random variables, :class:`int`" @beartype @@ -136,19 +128,15 @@ def run( raise TypeError("UQpy: nsamples should be an integer") # Check num_bootstrap_samples data type - if num_bootstrap_samples is not None: - if not isinstance(num_bootstrap_samples, int): - raise TypeError("UQpy: num_bootstrap_samples should be an integer.\n") - elif num_bootstrap_samples is None: - self.logger.info( - "UQpy: num_bootstrap_samples is set to None, confidence intervals will not be computed.\n" - ) + if num_bootstrap_samples is None: + self.logger.info("UQpy: num_bootstrap_samples is set to None, confidence intervals will not be computed.\n") + elif not isinstance(num_bootstrap_samples, int): + raise TypeError("UQpy: num_bootstrap_samples should be an integer.\n") ################## GENERATE SAMPLES ################## A_samples, W_samples, C_i_generator, _ = generate_pick_freeze_samples( - self.dist_object, self.n_samples, self.random_state - ) + self.dist_object, self.n_samples, self.random_state) self.logger.info("UQpy: Generated samples using the pick-freeze scheme.\n") @@ -162,9 +150,9 @@ def run( self.logger.info("UQpy: Model evaluations W completed.\n") - self.num_vars = A_samples.shape[1] + self.n_variables = A_samples.shape[1] - C_i_model_evals = np.zeros((self.n_samples, self.num_vars)) + C_i_model_evals = np.zeros((self.n_samples, self.n_variables)) for i, C_i in enumerate(C_i_generator): C_i_model_evals[:, i] = self._run_model(C_i).ravel() @@ -173,25 +161,17 @@ def run( self.logger.info("UQpy: All model evaluations computed successfully.\n") - ######################### STORAGE ######################## - - # Create dictionary to store the sensitivity indices - computed_indices = {} - ################## COMPUTE CVM INDICES ################## # flag is used to disable computation of # CVM indices during testing if not disable_CVM_indices: # Compute the CramĆ©r-von Mises indices - self.CVM_i = self.pick_and_freeze_estimator( - A_model_evals, W_model_evals, C_i_model_evals - ) + self.first_order_CramerVonMises_indices = self.pick_and_freeze_estimator( + A_model_evals, W_model_evals, C_i_model_evals) self.logger.info("UQpy: CramĆ©r-von Mises indices computed successfully.\n") - # Store the indices in the dictionary - computed_indices["CVM_i"] = self.CVM_i ################# COMPUTE CONFIDENCE INTERVALS ################## @@ -205,22 +185,16 @@ def run( C_i_model_evals, ] - self.confidence_interval_CVM_i = self.bootstrapping( + self.confidence_interval_CramerVonMises = self.bootstrapping( self.pick_and_freeze_estimator, estimator_inputs, - computed_indices["CVM_i"], + self.first_order_CramerVonMises_indices, num_bootstrap_samples, confidence_level, ) - self.logger.info( - "UQpy: Confidence intervals for CramĆ©r-von Mises indices computed successfully.\n" - ) + self.logger.info("UQpy: Confidence intervals for CramĆ©r-von Mises indices computed successfully.\n") - # Store the indices in the dictionary - computed_indices[ - "confidence_interval_CVM_i" - ] = self.confidence_interval_CVM_i ################## COMPUTE SOBOL INDICES ################## @@ -236,23 +210,16 @@ def run( n_outputs = 1 C_i_model_evals = C_i_model_evals.reshape((n_outputs, *_shape)) - self.sobol_i = compute_first_order_sobol( - A_model_evals, W_model_evals, C_i_model_evals - ) + self.first_order_sobol_indices = compute_first_order_sobol( + A_model_evals, W_model_evals, C_i_model_evals) self.logger.info("UQpy: First order Sobol indices computed successfully.\n") - self.sobol_total_i = compute_total_order_sobol( - A_model_evals, W_model_evals, C_i_model_evals - ) + self.total_order_sobol_indices = compute_total_order_sobol( + A_model_evals, W_model_evals, C_i_model_evals) self.logger.info("UQpy: Total order Sobol indices computed successfully.\n") - # Store the indices in the dictionary - computed_indices["sobol_i"] = self.sobol_i - computed_indices["sobol_total_i"] = self.sobol_total_i - - return computed_indices @staticmethod @beartype @@ -329,7 +296,7 @@ def pick_and_freeze_estimator( # (This should however be faster for small `N`, e.g. N=10_000) N = self.n_samples - m = self.num_vars + m = self.n_variables # Model evaluations f_A = A_model_evals.ravel() @@ -337,7 +304,7 @@ def pick_and_freeze_estimator( f_C_i = C_i_model_evals # Store CramĆ©rvonMises indices - First_order_indices = np.zeros((m, 1)) + first_order_indices = np.zeros((m, 1)) # Compute CramĆ©r-von Mises indices for i in range(m): @@ -355,6 +322,6 @@ def pick_and_freeze_estimator( sum_numerator += mean_product - mean_sum**2 sum_denominator += mean_sum - mean_sum**2 - First_order_indices[i] = sum_numerator / sum_denominator + first_order_indices[i] = sum_numerator / sum_denominator - return First_order_indices + return first_order_indices diff --git a/src/UQpy/sensitivity/GeneralisedSobol.py b/src/UQpy/sensitivity/GeneralisedSobolSensitivity.py similarity index 83% rename from src/UQpy/sensitivity/GeneralisedSobol.py rename to src/UQpy/sensitivity/GeneralisedSobolSensitivity.py index 2a976e004..0562235af 100644 --- a/src/UQpy/sensitivity/GeneralisedSobol.py +++ b/src/UQpy/sensitivity/GeneralisedSobolSensitivity.py @@ -40,7 +40,7 @@ ) -class GeneralisedSobol(Sensitivity): +class GeneralisedSobolSensitivity(Sensitivity): """ Compute the generalised Sobol indices for models with multiple outputs (vector-valued response) using the Pick-and-Freeze method. @@ -67,26 +67,17 @@ def __init__( # Create logger with the same name as the class self.logger = logging.getLogger(__name__) - self.logger.setLevel(logging.ERROR) - frmt = UQpyLoggingFormatter() - # create console handler with a higher log level - ch = logging.StreamHandler() - ch.setFormatter(frmt) + self.generalized_first_order_indices = None + "Generalised first order Sobol indices, :class:`ndarray` of shape (n_variables, 1)" - # add the handler to the logger - self.logger.addHandler(ch) - - self.gen_sobol_i = None - "Generalised first order Sobol indices, :class:`ndarray` of shape (num_vars, 1)" - - self.gen_sobol_total_i = None - "Generalised total order Sobol indices, :class:`ndarray` of shape (num_vars, 1)" + self.generalized_total_order_indices = None + "Generalised total order Sobol indices, :class:`ndarray` of shape (n_variables, 1)" self.n_samples = None "Number of samples used to compute the sensitivity indices, :class:`int`" - self.num_vars = None + self.n_variables = None "Number of model input variables, :class:`int`" @beartype @@ -124,23 +115,19 @@ def run( raise TypeError("UQpy: n_samples should be an integer") # Check num_bootstrap_samples data type - if num_bootstrap_samples is not None: - if not isinstance(num_bootstrap_samples, int): - raise TypeError("UQpy: num_bootstrap_samples should be an integer.\n") - elif num_bootstrap_samples is None: - self.logger.info( - "UQpy: num_bootstrap_samples is set to None, confidence intervals will not be computed.\n" - ) + if num_bootstrap_samples is None: + self.logger.info("UQpy: num_bootstrap_samples is set to None, confidence intervals will not be computed.\n") + elif not isinstance(num_bootstrap_samples, int): + raise TypeError("UQpy: num_bootstrap_samples should be an integer.\n") ################## GENERATE SAMPLES ################## (A_samples, B_samples, C_i_generator, _,) = generate_pick_freeze_samples( - self.dist_object, self.n_samples, self.random_state - ) + self.dist_object, self.n_samples, self.random_state) self.logger.info("UQpy: Generated samples using the pick-freeze scheme.\n") - self.num_vars = A_samples.shape[1] # Number of variables + self.n_variables = A_samples.shape[1] # Number of variables ################# MODEL EVALUATIONS #################### @@ -167,7 +154,7 @@ def run( self.n_outputs = A_model_evals.shape[1] # shape: (n_outputs, n_samples, num_vars) - C_i_model_evals = np.zeros((self.n_outputs, self.n_samples, self.num_vars)) + C_i_model_evals = np.zeros((self.n_outputs, self.n_samples, self.n_variables)) for i, C_i in enumerate(C_i_generator): @@ -185,32 +172,18 @@ def run( self.logger.info("UQpy: All model evaluations computed successfully.\n") - ######################### STORAGE ######################## - - # Create dictionary to store the sensitivity indices - computed_indices = {} - ################## COMPUTE GENERALISED SOBOL INDICES ################## - self.gen_sobol_i = self.compute_first_order_generalised_sobol_indices( - A_model_evals, B_model_evals, C_i_model_evals - ) + self.generalized_first_order_indices = self.compute_first_order_generalised_sobol_indices( + A_model_evals, B_model_evals, C_i_model_evals) - self.logger.info( - "UQpy: First order Generalised Sobol indices computed successfully.\n" - ) + self.logger.info("UQpy: First order Generalised Sobol indices computed successfully.\n") - self.gen_sobol_total_i = self.compute_total_order_generalised_sobol_indices( - A_model_evals, B_model_evals, C_i_model_evals - ) + self.generalized_total_order_indices = self.compute_total_order_generalised_sobol_indices( + A_model_evals, B_model_evals, C_i_model_evals) - self.logger.info( - "UQpy: Total order Generalised Sobol indices computed successfully.\n" - ) + self.logger.info("UQpy: Total order Generalised Sobol indices computed successfully.\n") - # Store the indices in the dictionary - computed_indices["gen_sobol_i"] = self.gen_sobol_i - computed_indices["gen_sobol_total_i"] = self.gen_sobol_total_i ################## CONFIDENCE INTERVALS #################### @@ -228,37 +201,26 @@ def run( self.confidence_interval_gen_sobol_i = self.bootstrapping( self.compute_first_order_generalised_sobol_indices, estimator_inputs, - computed_indices["gen_sobol_i"], + self.generalized_first_order_indices, num_bootstrap_samples, confidence_level, ) self.logger.info( - "UQpy: Confidence intervals for First order Generalised Sobol indices computed successfully.\n" - ) + "UQpy: Confidence intervals for First order Generalised Sobol indices computed successfully.\n") # Total order generalised Sobol indices self.confidence_interval_gen_sobol_total_i = self.bootstrapping( self.compute_total_order_generalised_sobol_indices, estimator_inputs, - computed_indices["gen_sobol_total_i"], + self.generalized_total_order_indices, num_bootstrap_samples, confidence_level, ) self.logger.info( - "UQpy: Confidence intervals for Total order Sobol Generalised indices computed successfully.\n" - ) - - # Store the indices in the dictionary - computed_indices[ - "confidence_interval_gen_sobol_i" - ] = self.confidence_interval_gen_sobol_i - computed_indices[ - "confidence_interval_gen_sobol_total_i" - ] = self.confidence_interval_gen_sobol_total_i + "UQpy: Confidence intervals for Total order Sobol Generalised indices computed successfully.\n") - return computed_indices @staticmethod @beartype diff --git a/src/UQpy/sensitivity/Sobol.py b/src/UQpy/sensitivity/SobolSensitivity.py similarity index 94% rename from src/UQpy/sensitivity/Sobol.py rename to src/UQpy/sensitivity/SobolSensitivity.py index 84e9b3510..69ea455ee 100644 --- a/src/UQpy/sensitivity/Sobol.py +++ b/src/UQpy/sensitivity/SobolSensitivity.py @@ -68,7 +68,7 @@ # TODO: Sampling strategies -class Sobol(Sensitivity): +class SobolSensitivity(Sensitivity): """ Compute Sobol sensitivity indices using the pick and freeze algorithm. For models with multiple outputs @@ -91,49 +91,40 @@ class Sobol(Sensitivity): **Methods:** """ - def __init__( - self, runmodel_object, dist_object, random_state=None, **kwargs + def __init__(self, runmodel_object, dist_object, random_state=None ) -> None: - super().__init__(runmodel_object, dist_object, random_state, **kwargs) + super().__init__(runmodel_object, dist_object, random_state) # Create logger with the same name as the class self.logger = logging.getLogger(__name__) - self.logger.setLevel(logging.ERROR) - frmt = UQpyLoggingFormatter() - # create console handler with a higher log level - ch = logging.StreamHandler() - ch.setFormatter(frmt) + self.first_order_indices = None + "First order Sobol indices, :class:`numpy.ndarray` of shape `(n_variables, n_outputs)`" - # add the handler to the logger - self.logger.addHandler(ch) + self.total_order_indices = None + "Total order Sobol indices, :class:`numpy.ndarray` of shape `(n_variables, n_outputs)`" - self.sobol_i = None - "First order Sobol indices, :class:`numpy.ndarray` of shape `(num_vars, n_outputs)`" - - self.sobol_total_i = None - "Total order Sobol indices, :class:`numpy.ndarray` of shape `(num_vars, n_outputs)`" - - self.sobol_ij = None + self.second_order_indices = None "Second order Sobol indices, :class:`numpy.ndarray` of shape `(num_second_order_terms, n_outputs)`" - self.confidence_interval_sobol_i = None - "Confidence intervals for the first order Sobol indices, :class:`numpy.ndarray` of shape `(num_vars, 2)`" + self.first_order_confidence_interval = None + "Confidence intervals for the first order Sobol indices, :class:`numpy.ndarray` of shape `(n_variables, 2)`" - self.confidence_interval_sobol_total_i = None - "Confidence intervals for the total order Sobol indices, :class:`numpy.ndarray` of shape `(num_vars, 2)`" + self.total_order_confidence_interval = None + "Confidence intervals for the total order Sobol indices, :class:`numpy.ndarray` of shape `(n_variables, 2)`" - self.confidence_interval_sobol_ij = None - "Confidence intervals for the second order Sobol indices, :class:`numpy.ndarray` of shape `(num_second_order_terms, 2)`" + self.second_order_confidence_interval = None + "Confidence intervals for the second order Sobol indices, :class:`numpy.ndarray` of shape" \ + " `(num_second_order_terms, 2)`" self.n_samples = None "Number of samples used to compute the sensitivity indices, :class:`int`" - self.num_vars = None + self.n_variables = None "Number of model input variables, :class:`int`" - self.multioutput = None + self.is_multi_output = None "True if the model has multiple outputs, :class:`bool`" @beartype @@ -211,7 +202,7 @@ def run( self.logger.info("UQpy: Generated samples using the pick-freeze scheme.") - self.num_vars = A_samples.shape[1] # Number of variables + self.n_variables = A_samples.shape[1] # Number of variables ################# MODEL EVALUATIONS #################### @@ -230,13 +221,13 @@ def run( self.n_outputs = 1 # multioutput flag - self.multioutput = True if self.n_outputs > 1 else False + self.is_multi_output = True if self.n_outputs > 1 else False - if not self.multioutput: + if not self.is_multi_output: A_model_evals = A_model_evals.reshape(-1, 1) B_model_evals = B_model_evals.reshape(-1, 1) - C_i_model_evals = np.zeros((self.n_outputs, self.n_samples, self.num_vars)) + C_i_model_evals = np.zeros((self.n_outputs, self.n_samples, self.n_variables)) for i, C_i in enumerate(C_i_generator): C_i_model_evals[:, :, i] = self._run_model(C_i).T @@ -246,7 +237,7 @@ def run( # Compute D_i_model_evals only if needed if estimate_second_order or total_order_scheme == "Saltelli2002": - D_i_model_evals = np.zeros((self.n_outputs, self.n_samples, self.num_vars)) + D_i_model_evals = np.zeros((self.n_outputs, self.n_samples, self.n_variables)) for i, D_i in enumerate(D_i_generator): D_i_model_evals[:, :, i] = self._run_model(D_i).T @@ -266,7 +257,7 @@ def run( ################## COMPUTE SOBOL INDICES ################## # First order Sobol indices - self.sobol_i = compute_first_order( + self.first_order_indices = compute_first_order( A_model_evals, B_model_evals, C_i_model_evals, @@ -276,10 +267,10 @@ def run( self.logger.info("UQpy: First order Sobol indices computed successfully.") - computed_indices["sobol_i"] = self.sobol_i + computed_indices["sobol_i"] = self.first_order_indices # Total order Sobol indices - self.sobol_total_i = compute_total_order( + self.total_order_indices = compute_total_order( A_model_evals, B_model_evals, C_i_model_evals, @@ -289,12 +280,12 @@ def run( self.logger.info("UQpy: Total order Sobol indices computed successfully.") - computed_indices["sobol_total_i"] = self.sobol_total_i + computed_indices["sobol_total_i"] = self.total_order_indices if estimate_second_order: # Second order Sobol indices - self.sobol_ij = compute_second_order( + self.second_order_indices = compute_second_order( A_model_evals, B_model_evals, C_i_model_evals, @@ -305,7 +296,7 @@ def run( self.logger.info("UQpy: Second order Sobol indices computed successfully.") - computed_indices["sobol_ij"] = self.sobol_ij + computed_indices["sobol_ij"] = self.second_order_indices ################## CONFIDENCE INTERVALS #################### @@ -321,7 +312,7 @@ def run( ] # First order Sobol indices - self.confidence_interval_sobol_i = self.bootstrapping( + self.first_order_confidence_interval = self.bootstrapping( compute_first_order, estimator_inputs, computed_indices["sobol_i"], @@ -336,10 +327,10 @@ def run( computed_indices[ "confidence_interval_sobol_i" - ] = self.confidence_interval_sobol_i + ] = self.first_order_confidence_interval # Total order Sobol indices - self.confidence_interval_sobol_total_i = self.bootstrapping( + self.total_order_confidence_interval = self.bootstrapping( compute_total_order, estimator_inputs, computed_indices["sobol_total_i"], @@ -354,11 +345,11 @@ def run( computed_indices[ "confidence_interval_sobol_total_i" - ] = self.confidence_interval_sobol_total_i + ] = self.total_order_confidence_interval # Second order Sobol indices if estimate_second_order: - self.confidence_interval_sobol_ij = self.bootstrapping( + self.second_order_confidence_interval = self.bootstrapping( compute_second_order, estimator_inputs, computed_indices["sobol_ij"], @@ -374,7 +365,7 @@ def run( computed_indices[ "confidence_interval_sobol_ij" - ] = self.confidence_interval_sobol_ij + ] = self.second_order_confidence_interval return computed_indices diff --git a/src/UQpy/sensitivity/__init__.py b/src/UQpy/sensitivity/__init__.py index 2433a768b..3634611ef 100644 --- a/src/UQpy/sensitivity/__init__.py +++ b/src/UQpy/sensitivity/__init__.py @@ -1,13 +1,13 @@ from UQpy.sensitivity.MorrisSensitivity import MorrisSensitivity from UQpy.sensitivity.PceSensitivity import PceSensitivity -from UQpy.sensitivity.Sobol import Sobol -from UQpy.sensitivity.CramervonMises import CramervonMises -from UQpy.sensitivity.Chatterjee import Chatterjee -from UQpy.sensitivity.GeneralisedSobol import GeneralisedSobol +from UQpy.sensitivity.SobolSensitivity import SobolSensitivity +from UQpy.sensitivity.CramerVonMisesSensitivity import CramerVonMisesSensitivity +from UQpy.sensitivity.ChatterjeeSensitivity import ChatterjeeSensitivity +from UQpy.sensitivity.GeneralisedSobolSensitivity import GeneralisedSobolSensitivity from . import MorrisSensitivity from . import PceSensitivity -from . import Sobol -from . import CramervonMises -from . import Chatterjee -from . import GeneralisedSobol +from . import SobolSensitivity +from . import CramerVonMisesSensitivity +from . import ChatterjeeSensitivity +from . import GeneralisedSobolSensitivity diff --git a/src/UQpy/sensitivity/baseclass/Sensitivity.py b/src/UQpy/sensitivity/baseclass/Sensitivity.py index 971fc9e7d..8233a29e0 100644 --- a/src/UQpy/sensitivity/baseclass/Sensitivity.py +++ b/src/UQpy/sensitivity/baseclass/Sensitivity.py @@ -34,8 +34,7 @@ def __init__( self, runmodel_object: RunModel, dist_object: Union[JointIndependent, Union[list, tuple]], - random_state: RandomStateType = None, - **kwargs, + random_state: RandomStateType = None ) -> None: self.runmodel_object = runmodel_object @@ -238,38 +237,17 @@ def bootstrapping( # store the confidence interval for each qoi confidence_interval_qoi = np.zeros((n_outputs, n_qois, 2)) - ##################### CREATE GENERATORS ##################### - - for i, input in enumerate(estimator_inputs): - - if isinstance(input, np.ndarray): - - # Example: f_A or f_B of models with single output. - # Shape: `(n_samples, 1)`. - if input.ndim == 2 and input.shape[1] == 1: - input_generators.append(self.bootstrap_sample_generator_1D(input)) - - # Example: f_C_i or f_D_i of models with single output. - # Shape: `(n_samples, num_vars)`. - elif input.ndim == 2 and input.shape[1] > 1: - input_generators.append(self.bootstrap_sample_generator_2D(input)) - - # Example: f_C_i or f_D_i of models with multiple outputs. - # Shape: `(n_outputs, n_samples, num_vars)`. - elif input.ndim == 3: - input_generators.append(self.bootstrap_sample_generator_3D(input)) + self._create_generators(estimator_inputs, input_generators) - # Example: if models evals is None. - elif input == None: - input_generators.append(input) + self._evaluate_boostrap_sample_qoi(bootstrapped_qoi, estimator, input_generators, kwargs, num_bootstrap_samples) - else: - raise ValueError( - f"UQpy: estimator_inputs[{i}] should be either None or `ndarray` of dimension 1, 2 or 3" - ) + confidence_interval_qoi = self._calculate_confidence_intervals(bootstrapped_qoi, confidence_interval_qoi, + confidence_level, n_outputs, qoi_mean) - ################### BOOTSTRAPPING ################## + return confidence_interval_qoi + def _evaluate_boostrap_sample_qoi(self, bootstrapped_qoi, estimator, input_generators, kwargs, + num_bootstrap_samples): # Compute the qoi for each bootstrap sample for j in range(num_bootstrap_samples): @@ -278,20 +256,18 @@ def bootstrapping( # generate samples for gen_input in input_generators: - if gen_input == None: + if gen_input is None: args.append(gen_input) else: args.append(gen_input.__next__()) bootstrapped_qoi[:, :, j] = estimator(*args, **kwargs).T - ################# CONFIDENCE INTERVAL ################ - + def _calculate_confidence_intervals(self, bootstrapped_qoi, confidence_interval_qoi, confidence_level, n_outputs, + qoi_mean): # Calculate confidence intervals delta = -scipy.stats.norm.ppf((1 - confidence_level) / 2) - for output_j in range(n_outputs): - # estimate the standard deviation using the bootstrap indices std_qoi = np.std(bootstrapped_qoi[output_j, :, :], axis=1, ddof=1) @@ -300,9 +276,34 @@ def bootstrapping( confidence_interval_qoi[output_j, :, 0] = lower_bound confidence_interval_qoi[output_j, :, 1] = upper_bound - # For models with single output, return 2D array. if n_outputs == 1: confidence_interval_qoi = confidence_interval_qoi[0, :, :] - return confidence_interval_qoi + + def _create_generators(self, estimator_inputs, input_generators): + for i, input in enumerate(estimator_inputs): + + if isinstance(input, np.ndarray): + + # Example: f_A or f_B of models with single output. + # Shape: `(n_samples, 1)`. + if input.ndim == 2 and input.shape[1] == 1: + input_generators.append(self.bootstrap_sample_generator_1D(input)) + + # Example: f_C_i or f_D_i of models with single output. + # Shape: `(n_samples, num_vars)`. + elif input.ndim == 2 and input.shape[1] > 1: + input_generators.append(self.bootstrap_sample_generator_2D(input)) + + # Example: f_C_i or f_D_i of models with multiple outputs. + # Shape: `(n_outputs, n_samples, num_vars)`. + elif input.ndim == 3: + input_generators.append(self.bootstrap_sample_generator_3D(input)) + + elif input is None: + input_generators.append(input) + + else: + raise ValueError(f"UQpy: estimator_inputs[{i}] should be either " + f"None or `ndarray` of dimension 1, 2 or 3") diff --git a/tests/unit_tests/sensitivity/test_baseclass.py b/tests/unit_tests/sensitivity/test_baseclass.py index 724abb298..bc32aaa16 100644 --- a/tests/unit_tests/sensitivity/test_baseclass.py +++ b/tests/unit_tests/sensitivity/test_baseclass.py @@ -17,7 +17,7 @@ from UQpy.run_model.model_execution.PythonModel import PythonModel from UQpy.distributions import Uniform from UQpy.distributions.collection.JointIndependent import JointIndependent -from UQpy.sensitivity.Sobol import Sobol +from UQpy.sensitivity.SobolSensitivity import SobolSensitivity from UQpy.sensitivity.baseclass.PickFreeze import generate_pick_freeze_samples # Prepare @@ -57,7 +57,7 @@ def ishigami_model_object(): def sobol_object(ishigami_model_object, ishigami_input_dist_object): """This function returns the Sobol object.""" - return Sobol(ishigami_model_object, ishigami_input_dist_object) + return SobolSensitivity(ishigami_model_object, ishigami_input_dist_object) @pytest.fixture() @@ -211,7 +211,7 @@ def test_bootstrap_for_vector(random_f_A, manual_bootstrap_samples_f_A): # Prepare np.random.seed(12345) #! set seed for reproducibility - gen_f_A = Sobol.bootstrap_sample_generator_1D(random_f_A) + gen_f_A = SobolSensitivity.bootstrap_sample_generator_1D(random_f_A) bootstrap_samples_f_A = next(gen_f_A) @@ -226,7 +226,7 @@ def test_bootstrap_for_matrix(random_f_C_i, manual_bootstrap_samples_f_C_i): # Prepare np.random.seed(12345) #! set seed for reproducibility - gen_f_C_i = Sobol.bootstrap_sample_generator_2D(random_f_C_i) + gen_f_C_i = SobolSensitivity.bootstrap_sample_generator_2D(random_f_C_i) bootstrap_samples_C_i = next(gen_f_C_i) diff --git a/tests/unit_tests/sensitivity/test_chatterjee.py b/tests/unit_tests/sensitivity/test_chatterjee.py index 5912cdb2c..756b4922f 100644 --- a/tests/unit_tests/sensitivity/test_chatterjee.py +++ b/tests/unit_tests/sensitivity/test_chatterjee.py @@ -49,7 +49,7 @@ from UQpy.run_model.model_execution.PythonModel import PythonModel from UQpy.distributions import Uniform, Normal from UQpy.distributions.collection.JointIndependent import JointIndependent -from UQpy.sensitivity.Chatterjee import Chatterjee +from UQpy.sensitivity.ChatterjeeSensitivity import ChatterjeeSensitivity # Prepare @@ -89,7 +89,7 @@ def exponential_model_object(): @pytest.fixture() def Chatterjee_object(exponential_model_object, exponential_input_dist_object): """This function creates the Chatterjee object""" - return Chatterjee(exponential_model_object, exponential_input_dist_object) + return ChatterjeeSensitivity(exponential_model_object, exponential_input_dist_object) @pytest.fixture() @@ -152,7 +152,7 @@ def ishigami_model_object(): @pytest.fixture() def Chatterjee_object_ishigami(ishigami_model_object, ishigami_input_dist_object): """This function creates the Chatterjee object""" - return Chatterjee(ishigami_model_object, ishigami_input_dist_object) + return ChatterjeeSensitivity(ishigami_model_object, ishigami_input_dist_object) @pytest.fixture() diff --git a/tests/unit_tests/sensitivity/test_cramer_von_mises.py b/tests/unit_tests/sensitivity/test_cramer_von_mises.py index ed9a55b24..72bf831b3 100644 --- a/tests/unit_tests/sensitivity/test_cramer_von_mises.py +++ b/tests/unit_tests/sensitivity/test_cramer_von_mises.py @@ -47,7 +47,7 @@ from UQpy.run_model.model_execution.PythonModel import PythonModel from UQpy.distributions import Normal, Uniform from UQpy.distributions.collection.JointIndependent import JointIndependent -from UQpy.sensitivity.CramervonMises import CramervonMises +from UQpy.sensitivity.CramerVonMisesSensitivity import CramerVonMisesSensitivity # Prepare ############################################################################### @@ -84,7 +84,7 @@ def exponential_model_object(): def CVM_object(exponential_model_object, exponential_input_dist_object): """This function returns the CVM object.""" - return CramervonMises(exponential_model_object, exponential_input_dist_object) + return CramerVonMisesSensitivity(exponential_model_object, exponential_input_dist_object) @pytest.fixture() @@ -230,7 +230,7 @@ def ishigami_model_object(): def CVM_object_ishigami(ishigami_model_object, ishigami_input_dist_object): """This function returns the CVM object.""" - return CramervonMises(ishigami_model_object, ishigami_input_dist_object) + return CramerVonMisesSensitivity(ishigami_model_object, ishigami_input_dist_object) @pytest.fixture() diff --git a/tests/unit_tests/sensitivity/test_generalised_sobol.py b/tests/unit_tests/sensitivity/test_generalised_sobol.py index 0b1f0919c..f1f1a5f7a 100644 --- a/tests/unit_tests/sensitivity/test_generalised_sobol.py +++ b/tests/unit_tests/sensitivity/test_generalised_sobol.py @@ -50,7 +50,7 @@ from UQpy.run_model.model_execution.PythonModel import PythonModel from UQpy.distributions import Uniform, Normal from UQpy.distributions.collection.JointIndependent import JointIndependent -from UQpy.sensitivity.GeneralisedSobol import GeneralisedSobol +from UQpy.sensitivity.GeneralisedSobolSensitivity import GeneralisedSobolSensitivity # Prepare ############################################################################### @@ -109,7 +109,7 @@ def generalised_sobol_object_normal(normal_input_dist_object, toy_model_object): """ - return GeneralisedSobol(toy_model_object, normal_input_dist_object) + return GeneralisedSobolSensitivity(toy_model_object, normal_input_dist_object) @pytest.fixture() @@ -120,7 +120,7 @@ def generalised_sobol_object_uniform(uniform_input_dist_object, toy_model_object """ - return GeneralisedSobol(toy_model_object, uniform_input_dist_object) + return GeneralisedSobolSensitivity(toy_model_object, uniform_input_dist_object) @pytest.fixture() diff --git a/tests/unit_tests/sensitivity/test_sobol.py b/tests/unit_tests/sensitivity/test_sobol.py index ff26801da..db1b94062 100644 --- a/tests/unit_tests/sensitivity/test_sobol.py +++ b/tests/unit_tests/sensitivity/test_sobol.py @@ -55,7 +55,7 @@ from UQpy.run_model.model_execution.PythonModel import PythonModel from UQpy.distributions import Uniform from UQpy.distributions.collection.JointIndependent import JointIndependent -from UQpy.sensitivity.Sobol import Sobol +from UQpy.sensitivity.SobolSensitivity import SobolSensitivity # Prepare ############################################################################### @@ -94,7 +94,7 @@ def ishigami_model_object(): def sobol_object(ishigami_model_object, ishigami_input_dist_object): """This function returns the Sobol object.""" - return Sobol(ishigami_model_object, ishigami_input_dist_object) + return SobolSensitivity(ishigami_model_object, ishigami_input_dist_object) @pytest.fixture() @@ -177,7 +177,7 @@ def bootstrap_sobol_index_variance(sobol_object, NUM_SAMPLES): #### Compute indices #### computed_indices = SA.run( n_samples=n_samples, - num_bootstrap_samples=num_bootstrap_samples, + n_bootstrap_samples=num_bootstrap_samples, confidence_level=confidence_level, ) @@ -291,7 +291,7 @@ def sobol_object_g_func( ): """This function creates the Sobol object for the g-function""" - sobol_object = Sobol( + sobol_object = SobolSensitivity( sobol_g_function_model_object, sobol_g_function_input_dist_object ) From 0b0ee2f75f0197c67db2d265dfaa0cbc2d4c6aca Mon Sep 17 00:00:00 2001 From: Dimitris Tsapetis Date: Tue, 9 Aug 2022 14:24:12 +0300 Subject: [PATCH 67/88] Fixes failing tests after changing naming conventions --- .../plot_generalised_sobol_multioutput.py | 4 ++-- .../sensitivity/sobol/plot_sobol_ishigami.py | 2 +- .../GeneralisedSobolSensitivity.py | 18 +++++++-------- src/UQpy/sensitivity/SobolSensitivity.py | 18 +++++++-------- .../unit_tests/sensitivity/test_chatterjee.py | 8 +++---- .../sensitivity/test_cramer_von_mises.py | 14 ++++++------ .../sensitivity/test_generalised_sobol.py | 22 +++++++++---------- tests/unit_tests/sensitivity/test_sobol.py | 12 +++++----- 8 files changed, 47 insertions(+), 51 deletions(-) diff --git a/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_multioutput.py b/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_multioutput.py index 039817373..f4347d84a 100644 --- a/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_multioutput.py +++ b/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_multioutput.py @@ -55,7 +55,7 @@ SA = GeneralisedSobolSensitivity(runmodel_obj, dist_object_1) computed_indices = SA.run( - n_samples=20_000, confidence_level=0.95, num_bootstrap_samples=5_00 + n_samples=20_000, confidence_level=0.95, n_bootstrap_samples=5_00 ) # %% [markdown] @@ -103,7 +103,7 @@ SA = GeneralisedSobolSensitivity(runmodel_obj, dist_object_2) computed_indices = SA.run( - n_samples=20_000, confidence_level=0.95, num_bootstrap_samples=5_00 + n_samples=20_000, confidence_level=0.95, n_bootstrap_samples=5_00 ) # %% [markdown] diff --git a/docs/code/sensitivity/sobol/plot_sobol_ishigami.py b/docs/code/sensitivity/sobol/plot_sobol_ishigami.py index dbedfde4c..664bbc0b9 100644 --- a/docs/code/sensitivity/sobol/plot_sobol_ishigami.py +++ b/docs/code/sensitivity/sobol/plot_sobol_ishigami.py @@ -74,7 +74,7 @@ # %% SA = SobolSensitivity(runmodel_obj, dist_object) -computed_indices = SA.run(n_samples=100_000, num_bootstrap_samples=100) +computed_indices = SA.run(n_samples=100_000, n_bootstrap_samples=100) # %% [markdown] # **First order Sobol indices** diff --git a/src/UQpy/sensitivity/GeneralisedSobolSensitivity.py b/src/UQpy/sensitivity/GeneralisedSobolSensitivity.py index 0562235af..338e8cf59 100644 --- a/src/UQpy/sensitivity/GeneralisedSobolSensitivity.py +++ b/src/UQpy/sensitivity/GeneralisedSobolSensitivity.py @@ -84,7 +84,7 @@ def __init__( def run( self, n_samples: PositiveInteger = 1_000, - num_bootstrap_samples: PositiveInteger = None, + n_bootstrap_samples: PositiveInteger = None, confidence_level: PositiveFloat = 0.95, ): @@ -95,7 +95,7 @@ def run( :param n_samples: Number of samples used to compute the sensitivity indices. \ Default is 1,000. - :param num_bootstrap_samples: Number of bootstrap samples used to compute the \ + :param n_bootstrap_samples: Number of bootstrap samples used to compute the \ confidence intervals. Default is :any:`None`. :param confidence_level: Confidence level used to compute the confidence \ @@ -115,10 +115,10 @@ def run( raise TypeError("UQpy: n_samples should be an integer") # Check num_bootstrap_samples data type - if num_bootstrap_samples is None: + if n_bootstrap_samples is None: self.logger.info("UQpy: num_bootstrap_samples is set to None, confidence intervals will not be computed.\n") - elif not isinstance(num_bootstrap_samples, int): + elif not isinstance(n_bootstrap_samples, int): raise TypeError("UQpy: num_bootstrap_samples should be an integer.\n") ################## GENERATE SAMPLES ################## @@ -187,7 +187,7 @@ def run( ################## CONFIDENCE INTERVALS #################### - if num_bootstrap_samples is not None: + if n_bootstrap_samples is not None: self.logger.info("UQpy: Computing confidence intervals ...\n") @@ -198,11 +198,11 @@ def run( ] # First order generalised Sobol indices - self.confidence_interval_gen_sobol_i = self.bootstrapping( + self.first_order_confidence_interval = self.bootstrapping( self.compute_first_order_generalised_sobol_indices, estimator_inputs, self.generalized_first_order_indices, - num_bootstrap_samples, + n_bootstrap_samples, confidence_level, ) @@ -210,11 +210,11 @@ def run( "UQpy: Confidence intervals for First order Generalised Sobol indices computed successfully.\n") # Total order generalised Sobol indices - self.confidence_interval_gen_sobol_total_i = self.bootstrapping( + self.total_order_confidence_interval = self.bootstrapping( self.compute_total_order_generalised_sobol_indices, estimator_inputs, self.generalized_total_order_indices, - num_bootstrap_samples, + n_bootstrap_samples, confidence_level, ) diff --git a/src/UQpy/sensitivity/SobolSensitivity.py b/src/UQpy/sensitivity/SobolSensitivity.py index 69ea455ee..967f8f968 100644 --- a/src/UQpy/sensitivity/SobolSensitivity.py +++ b/src/UQpy/sensitivity/SobolSensitivity.py @@ -131,7 +131,7 @@ def __init__(self, runmodel_object, dist_object, random_state=None def run( self, n_samples: PositiveInteger = 1_000, - num_bootstrap_samples: PositiveInteger = None, + n_bootstrap_samples: PositiveInteger = None, confidence_level: PositiveFloat = 0.95, estimate_second_order: bool = False, first_order_scheme: str = "Janon2014", @@ -145,7 +145,7 @@ def run( :param n_samples: Number of samples used to compute the sensitivity indices. \ Default is 1,000. - :param num_bootstrap_samples: Number of bootstrap samples used to compute the \ + :param n_bootstrap_samples: Number of bootstrap samples used to compute the \ confidence intervals. Default is :any:`None`. :param confidence_interval: Confidence level used to compute the confidence \ @@ -181,10 +181,10 @@ def run( raise TypeError("UQpy: n_samples should be an integer.") # Check num_bootstrap_samples data type - if num_bootstrap_samples is not None: - if not isinstance(num_bootstrap_samples, int): + if n_bootstrap_samples is not None: + if not isinstance(n_bootstrap_samples, int): raise TypeError("UQpy: num_bootstrap_samples should be an integer.") - elif num_bootstrap_samples is None: + elif n_bootstrap_samples is None: self.logger.info( "UQpy: num_bootstrap_samples is set to None, confidence intervals will not be computed." ) @@ -300,7 +300,7 @@ def run( ################## CONFIDENCE INTERVALS #################### - if num_bootstrap_samples is not None: + if n_bootstrap_samples is not None: self.logger.info("UQpy: Computing confidence intervals ...") @@ -316,7 +316,7 @@ def run( compute_first_order, estimator_inputs, computed_indices["sobol_i"], - num_bootstrap_samples, + n_bootstrap_samples, confidence_level, scheme=first_order_scheme, ) @@ -334,7 +334,7 @@ def run( compute_total_order, estimator_inputs, computed_indices["sobol_total_i"], - num_bootstrap_samples, + n_bootstrap_samples, confidence_level, scheme=total_order_scheme, ) @@ -353,7 +353,7 @@ def run( compute_second_order, estimator_inputs, computed_indices["sobol_ij"], - num_bootstrap_samples, + n_bootstrap_samples, confidence_level, first_order_sobol=computed_indices["sobol_i"], scheme=second_order_scheme, diff --git a/tests/unit_tests/sensitivity/test_chatterjee.py b/tests/unit_tests/sensitivity/test_chatterjee.py index 756b4922f..2a3bdb5fb 100644 --- a/tests/unit_tests/sensitivity/test_chatterjee.py +++ b/tests/unit_tests/sensitivity/test_chatterjee.py @@ -115,9 +115,9 @@ def numerical_Chatterjee_indices(Chatterjee_object): np.random.seed(12345) #! set seed for reproducibility - computed_indices = SA.run(n_samples=10_000) + SA.run(n_samples=10_000) - return computed_indices["chatterjee_i"] + return SA.first_order_chatterjee_indices @pytest.fixture() @@ -163,9 +163,9 @@ def numerical_Sobol_indices(Chatterjee_object_ishigami): np.random.seed(12345) - computed_indices = SA.run(n_samples=10_000, estimate_sobol_indices=True) + SA.run(n_samples=10_000, estimate_sobol_indices=True) - return computed_indices["sobol_i"] + return SA.first_order_sobol_indices @pytest.fixture() diff --git a/tests/unit_tests/sensitivity/test_cramer_von_mises.py b/tests/unit_tests/sensitivity/test_cramer_von_mises.py index 72bf831b3..9c8571bf3 100644 --- a/tests/unit_tests/sensitivity/test_cramer_von_mises.py +++ b/tests/unit_tests/sensitivity/test_cramer_von_mises.py @@ -114,9 +114,9 @@ def numerical_exponential_CVM_indices(CVM_object): np.random.seed(12345) #! set seed for reproducibility - computed_indices = SA.run(n_samples=50_000) + SA.run(n_samples=50_000) - return computed_indices["CVM_i"] + return SA.first_order_CramerVonMises_indices @pytest.fixture() @@ -145,14 +145,14 @@ def bootstrap_CVM_index_variance(CVM_object, NUM_SAMPLES): num_bootstrap_samples, n_samples = NUM_SAMPLES #### Compute indices #### - computed_indices = SA.run( + SA.run( n_samples=n_samples, num_bootstrap_samples=num_bootstrap_samples, confidence_level=confidence_level, ) - First_order = computed_indices["CVM_i"].ravel() - upper_bound_first_order = computed_indices["confidence_interval_CVM_i"][:, 1] + First_order = SA.first_order_CramerVonMises_indices.ravel() + upper_bound_first_order = SA.confidence_interval_CramerVonMises[:, 1] #### Compute variance #### std_bootstrap_first_order = (upper_bound_first_order - First_order) / delta @@ -244,11 +244,11 @@ def numerical_Sobol_indices(CVM_object_ishigami): np.random.seed(12345) - computed_indices = SA.run( + SA.run( n_samples=500_000, estimate_sobol_indices=True, disable_CVM_indices=True ) - return computed_indices["sobol_i"], computed_indices["sobol_total_i"] + return SA.first_order_sobol_indices, SA.total_order_sobol_indices @pytest.fixture() diff --git a/tests/unit_tests/sensitivity/test_generalised_sobol.py b/tests/unit_tests/sensitivity/test_generalised_sobol.py index f1f1a5f7a..a5e3e22e7 100644 --- a/tests/unit_tests/sensitivity/test_generalised_sobol.py +++ b/tests/unit_tests/sensitivity/test_generalised_sobol.py @@ -154,9 +154,9 @@ def pick_and_freeze_toy_GSI_normal(generalised_sobol_object_normal): np.random.seed(12345) #! set seed for reproducibility - computed_indices = SA.run(n_samples=100_000) + SA.run(n_samples=100_000) - return computed_indices["gen_sobol_i"] + return SA.generalized_first_order_indices @pytest.fixture() @@ -170,9 +170,9 @@ def pick_and_freeze_toy_GSI_uniform(generalised_sobol_object_uniform): np.random.seed(12345) #! set seed for reproducibility - computed_indices = SA.run(n_samples=100_000) + SA.run(n_samples=100_000) - return computed_indices["gen_sobol_i"] + return SA.generalized_first_order_indices @pytest.fixture() @@ -201,18 +201,16 @@ def bootstrap_generalised_sobol_index_variance( # Compute the confidence intervals - computed_indices = SA.run( + SA.run( n_samples=n_samples, - num_bootstrap_samples=num_bootstrap_samples, + n_bootstrap_samples=num_bootstrap_samples, confidence_level=confidence_level, ) - gen_sobol_i = computed_indices["gen_sobol_i"].ravel() - gen_sobol_total_i = computed_indices["gen_sobol_total_i"].ravel() - upper_bound_first_order = computed_indices["confidence_interval_gen_sobol_i"][:, 1] - upper_bound_total_order = computed_indices["confidence_interval_gen_sobol_total_i"][ - :, 1 - ] + gen_sobol_i = SA.generalized_first_order_indices.ravel() + gen_sobol_total_i = SA.generalized_total_order_indices.ravel() + upper_bound_first_order = SA.first_order_confidence_interval[:, 1] + upper_bound_total_order = SA.total_order_confidence_interval[:, 1] std_bootstrap_first_order = (upper_bound_first_order - gen_sobol_i) / delta std_bootstrap_total_order = (upper_bound_total_order - gen_sobol_total_i) / delta diff --git a/tests/unit_tests/sensitivity/test_sobol.py b/tests/unit_tests/sensitivity/test_sobol.py index db1b94062..d9164e219 100644 --- a/tests/unit_tests/sensitivity/test_sobol.py +++ b/tests/unit_tests/sensitivity/test_sobol.py @@ -175,18 +175,16 @@ def bootstrap_sobol_index_variance(sobol_object, NUM_SAMPLES): num_bootstrap_samples, n_samples = NUM_SAMPLES #### Compute indices #### - computed_indices = SA.run( + SA.run( n_samples=n_samples, n_bootstrap_samples=num_bootstrap_samples, confidence_level=confidence_level, ) - First_order = computed_indices["sobol_i"].ravel() - Total_order = computed_indices["sobol_total_i"].ravel() - confidence_interval_first_order = computed_indices["confidence_interval_sobol_i"] - confidence_interval_total_order = computed_indices[ - "confidence_interval_sobol_total_i" - ] + First_order = SA.first_order_indices.ravel() + Total_order = SA.total_order_indices.ravel() + confidence_interval_first_order = SA.first_order_confidence_interval + confidence_interval_total_order = SA.total_order_confidence_interval #### Compute variance #### upper_bound_first_order = confidence_interval_first_order[:, 1] From 7f3a9dc33d5cc16b9abe439fffea8c7fa31658b1 Mon Sep 17 00:00:00 2001 From: Dimitris Tsapetis Date: Wed, 10 Aug 2022 11:06:08 +0300 Subject: [PATCH 68/88] Fixes failing examples due to changed naming conventions --- azure-pipelines.yml | 24 ++++++------- ...ponential.py => chatterjee_exponential.py} | 6 ++-- ...jee_ishigami.py => chatterjee_ishigami.py} | 16 ++++----- ...sobol_func.py => chatterjee_sobol_func.py} | 18 +++++----- .../{plot_additive.py => additive.py} | 18 +++++----- .../{plot_ishigami.py => ishigami.py} | 20 +++++------ ..._cvm_exponential.py => cvm_exponential.py} | 16 ++++----- ...ot_cvm_sobol_func.py => cvm_sobol_func.py} | 10 +++--- ...alised_sobol_mechanical_oscillator_ODE.py} | 12 +++---- ...ut.py => generalised_sobol_multioutput.py} | 36 +++++++++---------- ...or_ODE.py => mechanical_oscillator_ODE.py} | 20 +++++------ ...ot_sobol_additive.py => sobol_additive.py} | 8 ++--- .../{plot_sobol_func.py => sobol_func.py} | 16 ++++----- ...ot_sobol_ishigami.py => sobol_ishigami.py} | 22 ++++++------ src/UQpy/sensitivity/SobolSensitivity.py | 32 +++-------------- 15 files changed, 125 insertions(+), 149 deletions(-) rename docs/code/sensitivity/chatterjee/{plot_chatterjee_exponential.py => chatterjee_exponential.py} (94%) rename docs/code/sensitivity/chatterjee/{plot_chatterjee_ishigami.py => chatterjee_ishigami.py} (85%) rename docs/code/sensitivity/chatterjee/{plot_chatterjee_sobol_func.py => chatterjee_sobol_func.py} (91%) rename docs/code/sensitivity/comparison/{plot_additive.py => additive.py} (88%) rename docs/code/sensitivity/comparison/{plot_ishigami.py => ishigami.py} (86%) rename docs/code/sensitivity/cramer_von_mises/{plot_cvm_exponential.py => cvm_exponential.py} (88%) rename docs/code/sensitivity/cramer_von_mises/{plot_cvm_sobol_func.py => cvm_sobol_func.py} (91%) rename docs/code/sensitivity/generalised_sobol/{plot_generalised_sobol_mechanical_oscillator_ODE.py => generalised_sobol_mechanical_oscillator_ODE.py} (91%) rename docs/code/sensitivity/generalised_sobol/{plot_generalised_sobol_multioutput.py => generalised_sobol_multioutput.py} (76%) rename docs/code/sensitivity/sobol/{plot_mechanical_oscillator_ODE.py => mechanical_oscillator_ODE.py} (78%) rename docs/code/sensitivity/sobol/{plot_sobol_additive.py => sobol_additive.py} (92%) rename docs/code/sensitivity/sobol/{plot_sobol_func.py => sobol_func.py} (91%) rename docs/code/sensitivity/sobol/{plot_sobol_ishigami.py => sobol_ishigami.py} (84%) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index a2091c51a..de36617a7 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -152,18 +152,18 @@ jobs: displayName: Install Anaconda packages condition: eq(variables['Build.SourceBranch'], 'refs/heads/master') - - bash: | - source activate myEnvironment - conda build . recipe --variants "{'version': ['$(GitVersion.SemVer)']}" - displayName: Build Noarch conda packages - condition: eq(variables['Build.SourceBranch'], 'refs/heads/master') - - - bash: | - source activate myEnvironment - anaconda login --username $(ANACONDAUSER) --password $(ANACONDAPW) - anaconda upload /usr/local/miniconda/envs/myEnvironment/conda-bld/noarch/*.tar.bz2 - displayName: Upload conda packages - condition: eq(variables['Build.SourceBranch'], 'refs/heads/master') +# - bash: | +# source activate myEnvironment +# conda build . recipe --variants "{'version': ['$(GitVersion.SemVer)']}" +# displayName: Build Noarch conda packages +# condition: eq(variables['Build.SourceBranch'], 'refs/heads/master') +# +# - bash: | +# source activate myEnvironment +# anaconda login --username $(ANACONDAUSER) --password $(ANACONDAPW) +# anaconda upload /usr/local/miniconda/envs/myEnvironment/conda-bld/noarch/*.tar.bz2 +# displayName: Upload conda packages +# condition: eq(variables['Build.SourceBranch'], 'refs/heads/master') - job: "Create_Docker_images" dependsOn: Build_UQpy_and_run_tests diff --git a/docs/code/sensitivity/chatterjee/plot_chatterjee_exponential.py b/docs/code/sensitivity/chatterjee/chatterjee_exponential.py similarity index 94% rename from docs/code/sensitivity/chatterjee/plot_chatterjee_exponential.py rename to docs/code/sensitivity/chatterjee/chatterjee_exponential.py index 04cf4cf03..d824a885e 100644 --- a/docs/code/sensitivity/chatterjee/plot_chatterjee_exponential.py +++ b/docs/code/sensitivity/chatterjee/chatterjee_exponential.py @@ -52,7 +52,7 @@ SA = ChatterjeeSensitivity(runmodel_obj, dist_object) # Compute Chatterjee indices using the pick and freeze algorithm -computed_indices = SA.run(n_samples=1_000_000) +SA.run(n_samples=1_000_000) # %% [markdown] # **Chattererjee indices** @@ -66,11 +66,11 @@ # :math:`S^2_{CVM} = \frac{6}{\pi} \operatorname{arctan}(\sqrt{19}) - 2 \approx 0.5693` # %% -computed_indices["chatterjee_i"] +SA.first_order_chatterjee_indices # **Plot the Chatterjee indices** fig1, ax1 = plot_sensitivity_index( - computed_indices["chatterjee_i"][:, 0], + SA.first_order_chatterjee_indices[:, 0], plot_title="Chatterjee indices", color="C2", ) diff --git a/docs/code/sensitivity/chatterjee/plot_chatterjee_ishigami.py b/docs/code/sensitivity/chatterjee/chatterjee_ishigami.py similarity index 85% rename from docs/code/sensitivity/chatterjee/plot_chatterjee_ishigami.py rename to docs/code/sensitivity/chatterjee/chatterjee_ishigami.py index 696a07fee..16807e1ec 100644 --- a/docs/code/sensitivity/chatterjee/plot_chatterjee_ishigami.py +++ b/docs/code/sensitivity/chatterjee/chatterjee_ishigami.py @@ -4,7 +4,7 @@ ============================================== The ishigami function is a non-linear, non-monotonic function that is commonly used to -benchmark uncertainty and senstivity analysis methods. +benchmark uncertainty and sensitivity analysis methods. .. math:: f(x_1, x_2, x_3) = sin(x_1) + a \cdot sin^2(x_2) + b \cdot x_3^4 sin(x_1) @@ -49,7 +49,7 @@ # %% [markdown] SA = ChatterjeeSensitivity(runmodel_obj, dist_object) -computed_indices = SA.run( +SA.run( n_samples=100_000, estimate_sobol_indices=True, n_bootstrap_samples=100, @@ -60,18 +60,18 @@ # **Chattererjee indices** # %% -computed_indices["chatterjee_i"] +SA.first_order_chatterjee_indices # %% [markdown] # **Confidence intervals for the Chatterjee indices** # %% -computed_indices["confidence_interval_chatterjee_i"] +SA.confidence_interval_chatterjee # **Plot the Chatterjee indices** fig1, ax1 = plot_sensitivity_index( - computed_indices["chatterjee_i"][:, 0], - computed_indices["confidence_interval_chatterjee_i"], + SA.first_order_chatterjee_indices[:, 0], + SA.confidence_interval_chatterjee, plot_title="Chatterjee indices", color="C2", ) @@ -88,11 +88,11 @@ # :math:`S_3`: 0.0 # %% -computed_indices["sobol_i"] +SA.first_order_sobol_indices # **Plot the first order Sobol indices** fig2, ax2 = plot_sensitivity_index( - computed_indices["sobol_i"][:, 0], + SA.first_order_sobol_indices[:, 0], plot_title="First order Sobol indices", color="C0", ) diff --git a/docs/code/sensitivity/chatterjee/plot_chatterjee_sobol_func.py b/docs/code/sensitivity/chatterjee/chatterjee_sobol_func.py similarity index 91% rename from docs/code/sensitivity/chatterjee/plot_chatterjee_sobol_func.py rename to docs/code/sensitivity/chatterjee/chatterjee_sobol_func.py index 3d54ad38d..293b2b88a 100644 --- a/docs/code/sensitivity/chatterjee/plot_chatterjee_sobol_func.py +++ b/docs/code/sensitivity/chatterjee/chatterjee_sobol_func.py @@ -68,17 +68,17 @@ SA = ChatterjeeSensitivity(runmodel_obj, dist_object) # Compute Chatterjee indices using rank statistics -computed_indices = SA.run(n_samples=500_000, estimate_sobol_indices=True) +SA.run(n_samples=500_000, estimate_sobol_indices=True) # %% [markdown] # **Chatterjee indices** # %% -computed_indices["chatterjee_i"] +SA.first_order_chatterjee_indices # **Plot the Chatterjee indices** fig1, ax1 = plot_sensitivity_index( - computed_indices["chatterjee_i"][:, 0], + SA.first_order_chatterjee_indices[:, 0], plot_title="Chatterjee indices", color="C2", ) @@ -101,11 +101,11 @@ # :math:`S_6` = 0.03760626 # %% -computed_indices["sobol_i"] +SA.first_order_sobol_indices # **Plot the first order Sobol indices** fig2, ax2 = plot_sensitivity_index( - computed_indices["sobol_i"][:, 0], + SA.first_order_sobol_indices[:, 0], plot_title="First order Sobol indices", color="C0", ) @@ -143,12 +143,12 @@ for i, sample_size in enumerate(sample_sizes): # Estimate using rank statistics - _indices = SA_chatterjee.run(n_samples=sample_size*7, estimate_sobol_indices=True) - store_rank_stats[:, i] = _indices["sobol_i"].ravel() + SA_chatterjee.run(n_samples=sample_size*7, estimate_sobol_indices=True) + store_rank_stats[:, i] = SA_chatterjee.first_order_sobol_indices.ravel() # Estimate using Pick and Freeze approach - _indices = SA_sobol.run(n_samples=sample_size) - store_pick_freeze[:, i] = _indices["sobol_i"].ravel() + SA_sobol.run(n_samples=sample_size) + store_pick_freeze[:, i] = SA_sobol.first_order_indices.ravel() # %% diff --git a/docs/code/sensitivity/comparison/plot_additive.py b/docs/code/sensitivity/comparison/additive.py similarity index 88% rename from docs/code/sensitivity/comparison/plot_additive.py rename to docs/code/sensitivity/comparison/additive.py index 8dd101a1f..1a533edd1 100644 --- a/docs/code/sensitivity/comparison/plot_additive.py +++ b/docs/code/sensitivity/comparison/additive.py @@ -64,7 +64,7 @@ # %% [markdown] SA_sobol = SobolSensitivity(runmodel_obj, dist_object) -computed_indices_sobol = SA_sobol.run(n_samples=50_000) +SA_sobol.run(n_samples=50_000) # %% [markdown] # **First order Sobol indices** @@ -76,7 +76,7 @@ # :math:`\mathrm{S}_2 = \frac{b^2 \cdot \mathbb{V}[X_2]}{a^2 \cdot \mathbb{V}[X_1] + b^2 \cdot \mathbb{V}[X_2]} = \frac{2^2 \cdot 1}{1^2 \cdot 1 + 2^2 \cdot 1} = 0.8` # %% -computed_indices_sobol["sobol_i"] +SA_sobol.first_order_indices # %% [markdown] # **Compute Chatterjee indices** @@ -84,19 +84,19 @@ # %% [markdown] SA_chatterjee = ChatterjeeSensitivity(runmodel_obj, dist_object) -computed_indices_chatterjee = SA_chatterjee.run(n_samples=50_000) +SA_chatterjee.run(n_samples=50_000) # %% -computed_indices_chatterjee["chatterjee_i"] +SA_chatterjee.first_order_chatterjee_indices # %% SA_cvm = cvm(runmodel_obj, dist_object) # Compute CVM indices using the pick and freeze algorithm -computed_indices_cvm = SA_cvm.run(n_samples=20_000, estimate_sobol_indices=True) +SA_cvm.run(n_samples=20_000, estimate_sobol_indices=True) # %% -computed_indices_cvm["CVM_i"] +SA_cvm.first_order_CramerVonMises_indices # %% # **Plot all indices** @@ -106,9 +106,9 @@ variable_names = [r"$X_{}$".format(i + 1) for i in range(num_vars)] # round to 2 decimal places -indices_1 = np.around(computed_indices_sobol["sobol_i"][:, 0], decimals=2) -indices_2 = np.around(computed_indices_chatterjee["chatterjee_i"][:, 0], decimals=2) -indices_3 = np.around(computed_indices_cvm["CVM_i"][:, 0], decimals=2) +indices_1 = np.around(SA_sobol.first_order_indices[:, 0], decimals=2) +indices_2 = np.around(SA_chatterjee.first_order_chatterjee_indices[:, 0], decimals=2) +indices_3 = np.around(SA_cvm.first_order_CramerVonMises_indices[:, 0], decimals=2) fig, ax = plt.subplots() width = 0.3 diff --git a/docs/code/sensitivity/comparison/plot_ishigami.py b/docs/code/sensitivity/comparison/ishigami.py similarity index 86% rename from docs/code/sensitivity/comparison/plot_ishigami.py rename to docs/code/sensitivity/comparison/ishigami.py index 958cd5821..9d8d95a86 100644 --- a/docs/code/sensitivity/comparison/plot_ishigami.py +++ b/docs/code/sensitivity/comparison/ishigami.py @@ -52,7 +52,7 @@ # %% SA_sobol = SobolSensitivity(runmodel_obj, dist_object) -computed_indices_sobol = SA_sobol.run(n_samples=100_000) +SA_sobol.run(n_samples=100_000) # %% [markdown] # **First order Sobol indices** @@ -66,7 +66,7 @@ # :math:`S_3` = 0.0 # %% -computed_indices_sobol["sobol_i"] +SA_sobol.first_order_indices # %% [markdown] # **Total order Sobol indices** @@ -80,7 +80,7 @@ # :math:`S_{T_3}` = 0.24368366 # %% -computed_indices_sobol["sobol_total_i"] +SA_sobol.total_order_indices # %% [markdown] # **Compute Chatterjee indices** @@ -88,20 +88,20 @@ # %% [markdown] SA_chatterjee = ChatterjeeSensitivity(runmodel_obj, dist_object) -computed_indices_chatterjee = SA_chatterjee.run(n_samples=50_000) +SA_chatterjee.run(n_samples=50_000) # %% -computed_indices_chatterjee["chatterjee_i"] +SA_chatterjee.first_order_chatterjee_indices # %% [markdown] # **Compute CramĆ©r-von Mises indices** SA_cvm = cvm(runmodel_obj, dist_object) # Compute CVM indices using the pick and freeze algorithm -computed_indices_cvm = SA_cvm.run(n_samples=20_000, estimate_sobol_indices=True) +SA_cvm.run(n_samples=20_000, estimate_sobol_indices=True) # %% -computed_indices_cvm["CVM_i"] +SA_cvm.first_order_CramerVonMises_indices # %% # **Plot all indices** @@ -111,9 +111,9 @@ variable_names = [r"$X_{}$".format(i + 1) for i in range(num_vars)] # round to 2 decimal places -indices_1 = np.around(computed_indices_sobol["sobol_i"][:, 0], decimals=2) -indices_2 = np.around(computed_indices_chatterjee["chatterjee_i"][:, 0], decimals=2) -indices_3 = np.around(computed_indices_cvm["CVM_i"][:, 0], decimals=2) +indices_1 = np.around(SA_sobol.first_order_indices[:, 0], decimals=2) +indices_2 = np.around(SA_chatterjee.first_order_chatterjee_indices[:, 0], decimals=2) +indices_3 = np.around(SA_cvm.first_order_CramerVonMises_indices[:, 0], decimals=2) fig, ax = plt.subplots() width = 0.3 diff --git a/docs/code/sensitivity/cramer_von_mises/plot_cvm_exponential.py b/docs/code/sensitivity/cramer_von_mises/cvm_exponential.py similarity index 88% rename from docs/code/sensitivity/cramer_von_mises/plot_cvm_exponential.py rename to docs/code/sensitivity/cramer_von_mises/cvm_exponential.py index 81b258bf1..8b1e2377a 100644 --- a/docs/code/sensitivity/cramer_von_mises/plot_cvm_exponential.py +++ b/docs/code/sensitivity/cramer_von_mises/cvm_exponential.py @@ -48,7 +48,7 @@ SA = cvm(runmodel_obj, dist_object) # Compute CVM indices using the pick and freeze algorithm -computed_indices = SA.run(n_samples=20_000, estimate_sobol_indices=True) +SA.run(n_samples=20_000, estimate_sobol_indices=True) # %% [markdown] # **CramĆ©r-von Mises indices** @@ -60,11 +60,11 @@ # :math:`S^2_{CVM} = \frac{6}{\pi} \operatorname{arctan}(\sqrt{19}) - 2 \approx 0.5693` # %% -computed_indices["CVM_i"] +SA.first_order_CramerVonMises_indices # **Plot the CVM indices** fig1, ax1 = plot_sensitivity_index( - computed_indices["CVM_i"][:, 0], + SA.first_order_CramerVonMises_indices[:, 0], plot_title="CramĆ©r-von Mises indices", color="C4", ) @@ -79,11 +79,11 @@ # :math:`S_2` = 0.3738 # %% -computed_indices["sobol_i"] +SA.first_order_sobol_indices # **Plot the first order Sobol indices** fig2, ax2 = plot_sensitivity_index( - computed_indices["sobol_i"][:, 0], + SA.first_order_sobol_indices[:, 0], plot_title="First order Sobol indices", color="C0", ) @@ -92,12 +92,12 @@ # **Estimated total order Sobol indices** # %% -computed_indices["sobol_total_i"] +SA.total_order_sobol_indices # **Plot the first and total order sensitivity indices** fig3, ax3 = plot_index_comparison( - computed_indices["sobol_i"][:, 0], - computed_indices["sobol_total_i"][:, 0], + SA.first_order_sobol_indices[:, 0], + SA.total_order_sobol_indices[:, 0], label_1="First order Sobol indices", label_2="Total order Sobol indices", plot_title="First and Total order Sobol indices", diff --git a/docs/code/sensitivity/cramer_von_mises/plot_cvm_sobol_func.py b/docs/code/sensitivity/cramer_von_mises/cvm_sobol_func.py similarity index 91% rename from docs/code/sensitivity/cramer_von_mises/plot_cvm_sobol_func.py rename to docs/code/sensitivity/cramer_von_mises/cvm_sobol_func.py index a69ab0e99..cf99040cc 100644 --- a/docs/code/sensitivity/cramer_von_mises/plot_cvm_sobol_func.py +++ b/docs/code/sensitivity/cramer_von_mises/cvm_sobol_func.py @@ -62,17 +62,17 @@ SA = cvm(runmodel_obj, dist_object) # Compute Sobol indices using rank statistics -computed_indices = SA.run(n_samples=50_000, estimate_sobol_indices=True) +SA.run(n_samples=50_000, estimate_sobol_indices=True) # %% [markdown] # **CramĆ©r-von Mises indices** # %% -computed_indices["CVM_i"] +SA.first_order_CramerVonMises_indices # **Plot the CVM indices** fig1, ax1 = plot_sensitivity_index( - computed_indices["CVM_i"][:, 0], + SA.first_order_CramerVonMises_indices[:, 0], plot_title="CramĆ©r-von Mises indices", color="C4", ) @@ -95,11 +95,11 @@ # :math:`S_6` = 0.03760626 # %% -computed_indices["sobol_i"] +SA.total_order_sobol_indices # **Plot the first order Sobol indices** fig2, ax2 = plot_sensitivity_index( - computed_indices["sobol_i"][:, 0], + SA.total_order_sobol_indices[:, 0], plot_title="First order Sobol indices", color="C0", ) diff --git a/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_mechanical_oscillator_ODE.py b/docs/code/sensitivity/generalised_sobol/generalised_sobol_mechanical_oscillator_ODE.py similarity index 91% rename from docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_mechanical_oscillator_ODE.py rename to docs/code/sensitivity/generalised_sobol/generalised_sobol_mechanical_oscillator_ODE.py index e9900578b..8ce86328e 100644 --- a/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_mechanical_oscillator_ODE.py +++ b/docs/code/sensitivity/generalised_sobol/generalised_sobol_mechanical_oscillator_ODE.py @@ -63,7 +63,7 @@ # %% [markdown] SA = GeneralisedSobolSensitivity(runmodel_obj, dist_object) -computed_indices = SA.run(n_samples=500) +SA.run(n_samples=500) # %% [markdown] # **First order Generalised Sobol indices** @@ -79,11 +79,11 @@ # :math:`GS_{\ell}` = 0.0561 # %% -computed_indices["gen_sobol_i"] +SA.generalized_first_order_indices # **Plot the first order sensitivity indices** fig1, ax1 = plot_sensitivity_index( - computed_indices["gen_sobol_i"][:, 0], + SA.generalized_first_order_indices[:, 0], plot_title="First order Generalised Sobol indices", variable_names=[r"$m$", "$c$", "$k$", "$\ell$"], color="C0", @@ -93,12 +93,12 @@ # **Total order Generalised Sobol indices** # %% -computed_indices["gen_sobol_total_i"] +SA.generalized_total_order_indices # **Plot the first and total order sensitivity indices** fig2, ax2 = plot_index_comparison( - computed_indices["gen_sobol_i"][:, 0], - computed_indices["gen_sobol_total_i"][:, 0], + SA.generalized_first_order_indices[:, 0], + SA.generalized_total_order_indices[:, 0], label_1="First order", label_2="Total order", plot_title="First and Total order Generalised Sobol indices", diff --git a/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_multioutput.py b/docs/code/sensitivity/generalised_sobol/generalised_sobol_multioutput.py similarity index 76% rename from docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_multioutput.py rename to docs/code/sensitivity/generalised_sobol/generalised_sobol_multioutput.py index f4347d84a..f17f62651 100644 --- a/docs/code/sensitivity/generalised_sobol/plot_generalised_sobol_multioutput.py +++ b/docs/code/sensitivity/generalised_sobol/generalised_sobol_multioutput.py @@ -54,7 +54,7 @@ # %% [markdown] SA = GeneralisedSobolSensitivity(runmodel_obj, dist_object_1) -computed_indices = SA.run( +SA.run( n_samples=20_000, confidence_level=0.95, n_bootstrap_samples=5_00 ) @@ -70,25 +70,25 @@ # :math:`GS_2` = 0.1179 # %% -computed_indices["gen_sobol_i"] +SA.generalized_first_order_indices # **Plot the first order sensitivity indices** fig1, ax1 = plot_sensitivity_index( - computed_indices["gen_sobol_i"][:, 0], - confidence_interval=computed_indices["confidence_interval_gen_sobol_i"], + SA.generalized_first_order_indices[:, 0], + confidence_interval=SA.first_order_confidence_interval, plot_title="First order Generalised Sobol indices", color="C0", ) # %% -computed_indices["gen_sobol_total_i"] +SA.generalized_total_order_indices # **Plot the first and total order sensitivity indices** fig2, ax2 = plot_index_comparison( - computed_indices["gen_sobol_i"][:, 0], - computed_indices["gen_sobol_total_i"][:, 0], - confidence_interval_1=computed_indices["confidence_interval_gen_sobol_i"], - confidence_interval_2=computed_indices["confidence_interval_gen_sobol_total_i"], + SA.generalized_first_order_indices[:, 0], + SA.generalized_total_order_indices[:, 0], + confidence_interval_1=SA.first_order_confidence_interval, + confidence_interval_2=SA.total_order_confidence_interval, label_1="First order", label_2="Total order", plot_title="First and Total order Generalised Sobol indices", @@ -102,7 +102,7 @@ SA = GeneralisedSobolSensitivity(runmodel_obj, dist_object_2) -computed_indices = SA.run( +SA.run( n_samples=20_000, confidence_level=0.95, n_bootstrap_samples=5_00 ) @@ -118,25 +118,25 @@ # :math:`GS_2` = 0.3566 # %% -computed_indices["gen_sobol_i"] +SA.generalized_first_order_indices # **Plot the first order sensitivity indices** fig3, ax3 = plot_sensitivity_index( - computed_indices["gen_sobol_i"][:, 0], - confidence_interval=computed_indices["confidence_interval_gen_sobol_i"], + SA.generalized_first_order_indices[:, 0], + confidence_interval=SA.first_order_confidence_interval, plot_title="First order Generalised Sobol indices", color="C0", ) # %% -computed_indices["gen_sobol_total_i"] +SA.total_order_confidence_interval # **Plot the first and total order sensitivity indices** fig4, ax4 = plot_index_comparison( - computed_indices["gen_sobol_i"][:, 0], - computed_indices["gen_sobol_total_i"][:, 0], - confidence_interval_1=computed_indices["confidence_interval_gen_sobol_i"], - confidence_interval_2=computed_indices["confidence_interval_gen_sobol_total_i"], + SA.generalized_first_order_indices[:, 0], + SA.total_order_confidence_interval[:, 0], + confidence_interval_1=SA.first_order_confidence_interval, + confidence_interval_2=SA.total_order_confidence_interval, label_1="First order", label_2="Total order", plot_title="First and Total order Generalised Sobol indices", diff --git a/docs/code/sensitivity/sobol/plot_mechanical_oscillator_ODE.py b/docs/code/sensitivity/sobol/mechanical_oscillator_ODE.py similarity index 78% rename from docs/code/sensitivity/sobol/plot_mechanical_oscillator_ODE.py rename to docs/code/sensitivity/sobol/mechanical_oscillator_ODE.py index 0403e8a54..5dcbb0baf 100644 --- a/docs/code/sensitivity/sobol/plot_mechanical_oscillator_ODE.py +++ b/docs/code/sensitivity/sobol/mechanical_oscillator_ODE.py @@ -61,7 +61,7 @@ # %% [markdown] SA = SobolSensitivity(runmodel_obj, dist_object) -computed_indices = SA.run(n_samples=500) +SA.run(n_samples=500) # %% # **Plot the Sobol indices** @@ -74,12 +74,10 @@ fig, ax = plt.subplots(1, 2, figsize=(16, 8)) -ax[0].plot(T, computed_indices["sobol_total_i"][0, :], "r", label=r"$m$") -ax[0].plot(T, computed_indices["sobol_total_i"][1, :], "g", label=r"$c$") -ax[0].plot(T, computed_indices["sobol_total_i"][2, :], label=r"$k$", color="royalblue") -ax[0].plot( - T, computed_indices["sobol_total_i"][3, :], label=r"$\ell$", color="aquamarine" -) +ax[0].plot(T, SA.total_order_indices[0, :], "r", label=r"$m$") +ax[0].plot(T, SA.total_order_indices[1, :], "g", label=r"$c$") +ax[0].plot(T, SA.total_order_indices[2, :], label=r"$k$", color="royalblue") +ax[0].plot(T, SA.total_order_indices[3, :], label=r"$\ell$", color="aquamarine") ax[0].set_title("Total order Sobol indices", fontsize=16) ax[0].set_xlabel("time (s)", fontsize=16) @@ -88,10 +86,10 @@ ax[0].set_ybound(-0.2, 1.2) ax[0].legend() -ax[1].plot(T, computed_indices["sobol_i"][0, :], "r", label=r"$m$") -ax[1].plot(T, computed_indices["sobol_i"][1, :], "g", label=r"$c$") -ax[1].plot(T, computed_indices["sobol_i"][2, :], label=r"$k$", color="royalblue") -ax[1].plot(T, computed_indices["sobol_i"][3, :], label=r"$\ell$", color="aquamarine") +ax[1].plot(T, SA.first_order_indices[0, :], "r", label=r"$m$") +ax[1].plot(T, SA.first_order_indices[1, :], "g", label=r"$c$") +ax[1].plot(T, SA.first_order_indices[2, :], label=r"$k$", color="royalblue") +ax[1].plot(T, SA.first_order_indices[3, :], label=r"$\ell$", color="aquamarine") ax[1].set_title("First order Sobol indices", fontsize=16) ax[1].set_xlabel("time (s)", fontsize=16) diff --git a/docs/code/sensitivity/sobol/plot_sobol_additive.py b/docs/code/sensitivity/sobol/sobol_additive.py similarity index 92% rename from docs/code/sensitivity/sobol/plot_sobol_additive.py rename to docs/code/sensitivity/sobol/sobol_additive.py index dca212fe6..d3749cada 100644 --- a/docs/code/sensitivity/sobol/plot_sobol_additive.py +++ b/docs/code/sensitivity/sobol/sobol_additive.py @@ -53,7 +53,7 @@ # %% [markdown] SA = SobolSensitivity(runmodel_obj, dist_object) -computed_indices = SA.run(n_samples=50_000) +SA.run(n_samples=50_000) # %% [markdown] # **First order Sobol indices** @@ -65,13 +65,13 @@ # :math:`\mathrm{S}_2 = \frac{b^2 \cdot \mathbb{V}[X_2]}{a^2 \cdot \mathbb{V}[X_1] + b^2 \cdot \mathbb{V}[X_2]} = \frac{2^2 \cdot 1}{1^2 \cdot 1 + 2^2 \cdot 1} = 0.8` # %% -computed_indices["sobol_i"] +SA.first_order_indices # %% # **Plot the first and total order sensitivity indices** fig1, ax1 = plot_index_comparison( - computed_indices["sobol_i"][:, 0], - computed_indices["sobol_total_i"][:, 0], + SA.first_order_indices[:, 0], + SA.total_order_indices[:, 0], label_1="First order Sobol indices", label_2="Total order Sobol indices", plot_title="First and Total order Sobol indices", diff --git a/docs/code/sensitivity/sobol/plot_sobol_func.py b/docs/code/sensitivity/sobol/sobol_func.py similarity index 91% rename from docs/code/sensitivity/sobol/plot_sobol_func.py rename to docs/code/sensitivity/sobol/sobol_func.py index ba28d3e79..5dfdf1dcc 100644 --- a/docs/code/sensitivity/sobol/plot_sobol_func.py +++ b/docs/code/sensitivity/sobol/sobol_func.py @@ -62,7 +62,7 @@ SA = SobolSensitivity(runmodel_obj, dist_object) # Compute Sobol indices using the pick and freeze algorithm -computed_indices = SA.run(n_samples=50_000, estimate_second_order=True) +SA.run(n_samples=50_000, estimate_second_order=True) # %% [markdown] # **First order Sobol indices** @@ -82,12 +82,12 @@ # :math:`S_6` = 5.86781190e-05 # %% -computed_indices["sobol_i"] +SA.first_order_indices # %% # **Plot the first order sensitivity indices** fig1, ax1 = plot_sensitivity_index( - computed_indices["sobol_i"][:, 0], + SA.first_order_indices[:, 0], plot_title="First order Sobol indices", color="C0", ) @@ -111,13 +111,13 @@ # # %% -computed_indices["sobol_total_i"] +SA.total_order_indices # %% # **Plot the first and total order sensitivity indices** fig2, ax2 = plot_index_comparison( - computed_indices["sobol_i"][:, 0], - computed_indices["sobol_total_i"][:, 0], + SA.first_order_indices[:, 0], + SA.total_order_indices[:, 0], label_1="First order Sobol indices", label_2="Total order Sobol indices", plot_title="First and Total order Sobol indices", @@ -159,11 +159,11 @@ # :math:`S_{T_{56}}` = 2.0e-9 # %% -computed_indices["sobol_ij"] +SA.second_order_indices # %% # **Plot the second order sensitivity indices** fig3, ax3 = plot_second_order_indices( - computed_indices["sobol_ij"][:, 0], + SA.second_order_indices[:, 0], num_vars=num_vars, ) diff --git a/docs/code/sensitivity/sobol/plot_sobol_ishigami.py b/docs/code/sensitivity/sobol/sobol_ishigami.py similarity index 84% rename from docs/code/sensitivity/sobol/plot_sobol_ishigami.py rename to docs/code/sensitivity/sobol/sobol_ishigami.py index 664bbc0b9..5875b8afd 100644 --- a/docs/code/sensitivity/sobol/plot_sobol_ishigami.py +++ b/docs/code/sensitivity/sobol/sobol_ishigami.py @@ -74,7 +74,7 @@ # %% SA = SobolSensitivity(runmodel_obj, dist_object) -computed_indices = SA.run(n_samples=100_000, n_bootstrap_samples=100) +SA.run(n_samples=100_000, n_bootstrap_samples=100) # %% [markdown] # **First order Sobol indices** @@ -88,7 +88,7 @@ # :math:`S_3` = 0.0 # %% -computed_indices["sobol_i"] +SA.first_order_indices # %% [markdown] # **Total order Sobol indices** @@ -102,25 +102,25 @@ # :math:`S_{T_3}` = 0.24368366 # %% -computed_indices["sobol_total_i"] +SA.total_order_indices # %% [markdown] # **Confidence intervals for first order Sobol indices** # %% -computed_indices["confidence_interval_sobol_i"] +SA.first_order_confidence_interval # %% [markdown] # **Confidence intervals for total order Sobol indices** # %% -computed_indices["confidence_interval_sobol_total_i"] +SA.total_order_confidence_interval # %% # **Plot the first order sensitivity indices** fig1, ax1 = plot_sensitivity_index( - computed_indices["sobol_i"][:, 0], - confidence_interval=computed_indices["confidence_interval_sobol_i"], + SA.first_order_indices[:, 0], + confidence_interval=SA.first_order_confidence_interval, plot_title="First order Sobol indices", variable_names=["$X_1$", "$X_2$", "$X_3$"], color="C0", @@ -129,10 +129,10 @@ # %% # **Plot the first and total order sensitivity indices** fig2, ax2 = plot_index_comparison( - computed_indices["sobol_i"][:, 0], - computed_indices["sobol_total_i"][:, 0], - confidence_interval_1=computed_indices["confidence_interval_sobol_i"], - confidence_interval_2=computed_indices["confidence_interval_sobol_total_i"], + SA.first_order_indices[:, 0], + SA.total_order_indices[:, 0], + confidence_interval_1=SA.first_order_confidence_interval, + confidence_interval_2=SA.total_order_confidence_interval, label_1="First order Sobol indices", label_2="Total order Sobol indices", plot_title="First and Total order Sobol indices", diff --git a/src/UQpy/sensitivity/SobolSensitivity.py b/src/UQpy/sensitivity/SobolSensitivity.py index 967f8f968..294bc3c57 100644 --- a/src/UQpy/sensitivity/SobolSensitivity.py +++ b/src/UQpy/sensitivity/SobolSensitivity.py @@ -249,10 +249,6 @@ def run( self.logger.info("UQpy: All model evaluations computed successfully.") - ######################### STORAGE ######################## - - # Create dictionary to store the sensitivity indices - computed_indices = {} ################## COMPUTE SOBOL INDICES ################## @@ -267,8 +263,6 @@ def run( self.logger.info("UQpy: First order Sobol indices computed successfully.") - computed_indices["sobol_i"] = self.first_order_indices - # Total order Sobol indices self.total_order_indices = compute_total_order( A_model_evals, @@ -280,8 +274,6 @@ def run( self.logger.info("UQpy: Total order Sobol indices computed successfully.") - computed_indices["sobol_total_i"] = self.total_order_indices - if estimate_second_order: # Second order Sobol indices @@ -290,13 +282,12 @@ def run( B_model_evals, C_i_model_evals, D_i_model_evals, - computed_indices["sobol_i"], + self.first_order_indices, scheme=second_order_scheme, ) self.logger.info("UQpy: Second order Sobol indices computed successfully.") - computed_indices["sobol_ij"] = self.second_order_indices ################## CONFIDENCE INTERVALS #################### @@ -315,7 +306,7 @@ def run( self.first_order_confidence_interval = self.bootstrapping( compute_first_order, estimator_inputs, - computed_indices["sobol_i"], + self.first_order_indices, n_bootstrap_samples, confidence_level, scheme=first_order_scheme, @@ -325,15 +316,11 @@ def run( "UQpy: Confidence intervals for First order Sobol indices computed successfully." ) - computed_indices[ - "confidence_interval_sobol_i" - ] = self.first_order_confidence_interval - # Total order Sobol indices self.total_order_confidence_interval = self.bootstrapping( compute_total_order, estimator_inputs, - computed_indices["sobol_total_i"], + self.total_order_indices, n_bootstrap_samples, confidence_level, scheme=total_order_scheme, @@ -343,19 +330,16 @@ def run( "UQpy: Confidence intervals for Total order Sobol indices computed successfully." ) - computed_indices[ - "confidence_interval_sobol_total_i" - ] = self.total_order_confidence_interval # Second order Sobol indices if estimate_second_order: self.second_order_confidence_interval = self.bootstrapping( compute_second_order, estimator_inputs, - computed_indices["sobol_ij"], + self.second_order_indices, n_bootstrap_samples, confidence_level, - first_order_sobol=computed_indices["sobol_i"], + first_order_sobol=self.first_order_indices, scheme=second_order_scheme, ) @@ -363,12 +347,6 @@ def run( "UQpy: Confidence intervals for Second order Sobol indices computed successfully." ) - computed_indices[ - "confidence_interval_sobol_ij" - ] = self.second_order_confidence_interval - - return computed_indices - ###################### Pick and Freeze Methods ##################### From bc15e7f02aff2579f576659666c6c75c46f28f4b Mon Sep 17 00:00:00 2001 From: Dimitris Tsapetis Date: Fri, 26 Aug 2022 10:59:18 +0300 Subject: [PATCH 69/88] Documentation fixes due to class and attribute renaming --- README.rst | 3 +-- docs/source/index.rst | 2 +- docs/source/sensitivity/chatterjee.rst | 16 ++++++------ docs/source/sensitivity/cramer_von_mises.rst | 18 ++++++------- docs/source/sensitivity/generalised_sobol.rst | 14 +++++----- docs/source/sensitivity/index.rst | 2 +- docs/source/sensitivity/sobol.rst | 26 +++++++++---------- src/UQpy/sensitivity/ChatterjeeSensitivity.py | 18 +++++-------- .../sensitivity/CramerVonMisesSensitivity.py | 11 ++------ .../GeneralisedSobolSensitivity.py | 25 ++++++++---------- src/UQpy/sensitivity/SobolSensitivity.py | 12 --------- 11 files changed, 58 insertions(+), 89 deletions(-) diff --git a/README.rst b/README.rst index 6ff98434d..4bd0daeb4 100644 --- a/README.rst +++ b/README.rst @@ -1,7 +1,6 @@ |AzureDevops| |PyPIdownloads| |PyPI| |CondaSURG| |CondaPlatforms| |GithubRelease| |Binder| |Docs| |bear-ified| .. |Docs| image:: https://img.shields.io/readthedocs/uqpy?style=plastic :alt: Read the Docs -.. |CondaSURG| image:: https://img.shields.io/conda/vn/SURG_JHU/uqpy?style=plastic :alt: Conda (channel only) .. |CondaPlatforms| image:: https://img.shields.io/conda/pn/SURG_JHU/uqpy?style=plastic :alt: Conda .. |GithubRelease| image:: https://img.shields.io/github/v/release/SURGroup/UQpy?style=plastic :alt: GitHub release (latest by date) .. |AzureDevops| image:: https://img.shields.io/azure-devops/build/UQpy/5ce1851f-e51f-4e18-9eca-91c3ad9f9900/1?style=plastic :alt: Azure DevOps builds @@ -35,7 +34,7 @@ Uncertainty Quantification with python (UQpy) + + + | | Promit Chakroborty, LukĆ”Å” NovĆ”k, Andrew Solanto | +-----------------------+------------------------------------------------------------------+ -| **Contributors:** | Michael Gardner | +| **Contributors:** | Michael Gardner, Prateek Bhustali, Julius Schultz, Ulrich Rƶmer | +-----------------------+------------------------------------------------------------------+ Contact diff --git a/docs/source/index.rst b/docs/source/index.rst index 41c0d214a..d641b0695 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -20,7 +20,7 @@ as a set of modules centered around core capabilities in Uncertainty Quantificat + + + | | Promit Chakroborty, LukĆ”Å” NovĆ”k, Andrew Solanto | +-----------------------+------------------------------------------------------------------+ -| **Contributors:** | Michael Gardner | +| **Contributors:** | Michael Gardner, Prateek Bhustali, Julius Schultz, Ulrich Rƶmer | +-----------------------+------------------------------------------------------------------+ Introduction diff --git a/docs/source/sensitivity/chatterjee.rst b/docs/source/sensitivity/chatterjee.rst index 42ee716e8..bc2ccb6f3 100644 --- a/docs/source/sensitivity/chatterjee.rst +++ b/docs/source/sensitivity/chatterjee.rst @@ -16,22 +16,22 @@ Furthermore, the Sobol indices can be efficiently estimated by leveraging the sa Chatterjee Class ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -The :class:`Chatterjee` class is imported using the following command: +The :class:`.ChatterjeeSensitivity` class is imported using the following command: ->>> from UQpy.sensitivity.chatterjee import Chatterjee +>>> from UQpy.sensitivity.ChatterjeeSensitivity import ChatterjeeSensitivity Methods """"""" -.. autoclass:: UQpy.sensitivity.Chatterjee +.. autoclass:: UQpy.sensitivity.ChatterjeeSensitivity :members: run, compute_chatterjee_indices, rank_analog_to_pickfreeze, compute_Sobol_indices Attributes """""""""" -.. autoattribute:: UQpy.sensitivity.Chatterjee.chatterjee_i -.. autoattribute:: UQpy.sensitivity.Chatterjee.sobol_i -.. autoattribute:: UQpy.sensitivity.Chatterjee.confidence_interval_chatterjee_i -.. autoattribute:: UQpy.sensitivity.Chatterjee.num_vars -.. autoattribute:: UQpy.sensitivity.Chatterjee.n_samples +.. autoattribute:: UQpy.sensitivity.ChatterjeeSensitivity.first_order_chatterjee_indices +.. autoattribute:: UQpy.sensitivity.ChatterjeeSensitivity.first_order_sobol_indices +.. autoattribute:: UQpy.sensitivity.ChatterjeeSensitivity.confidence_interval_chatterjee +.. autoattribute:: UQpy.sensitivity.ChatterjeeSensitivity.n_variables +.. autoattribute:: UQpy.sensitivity.ChatterjeeSensitivity.n_samples Examples """""""""" diff --git a/docs/source/sensitivity/cramer_von_mises.rst b/docs/source/sensitivity/cramer_von_mises.rst index 22477cbc2..3736ffc6d 100644 --- a/docs/source/sensitivity/cramer_von_mises.rst +++ b/docs/source/sensitivity/cramer_von_mises.rst @@ -34,23 +34,23 @@ The above first and total order indices are estimated using the Pick-and-Freeze CramĆ©r-von Mises Class ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -The :class:`CramĆ©r-von Mises` class is imported using the following command: +The :class:`.CramerVonMisesSensitivity` class is imported using the following command: ->>> from UQpy.sensitivity.cramer_von_mises import CramerVonMises +>>> from UQpy.sensitivity.CramerVonMisesSensitivity import CramerVonMisesSensitivity Methods """"""" -.. autoclass:: UQpy.sensitivity.CramervonMises +.. autoclass:: UQpy.sensitivity.CramerVonMisesSensitivity :members: run Attributes """""""""" -.. autoattribute:: UQpy.sensitivity.CramervonMises.CVM_i -.. autoattribute:: UQpy.sensitivity.CramervonMises.confidence_interval_CVM_i -.. autoattribute:: UQpy.sensitivity.CramervonMises.sobol_i -.. autoattribute:: UQpy.sensitivity.CramervonMises.sobol_total_i -.. autoattribute:: UQpy.sensitivity.CramervonMises.n_samples -.. autoattribute:: UQpy.sensitivity.CramervonMises.num_vars +.. autoattribute:: UQpy.sensitivity.CramerVonMisesSensitivity.first_order_CramerVonMises_indices +.. autoattribute:: UQpy.sensitivity.CramerVonMisesSensitivity.confidence_interval_CramerVonMises +.. autoattribute:: UQpy.sensitivity.CramerVonMisesSensitivity.first_order_sobol_indices +.. autoattribute:: UQpy.sensitivity.CramerVonMisesSensitivity.total_order_sobol_indices +.. autoattribute:: UQpy.sensitivity.CramerVonMisesSensitivity.n_samples +.. autoattribute:: UQpy.sensitivity.CramerVonMisesSensitivity.n_variables Examples diff --git a/docs/source/sensitivity/generalised_sobol.rst b/docs/source/sensitivity/generalised_sobol.rst index 1fcb5fd5a..6ecc7c9ad 100644 --- a/docs/source/sensitivity/generalised_sobol.rst +++ b/docs/source/sensitivity/generalised_sobol.rst @@ -50,22 +50,22 @@ and Generalised Sobol Class ^^^^^^^^^^^^^^^^^^^^^^^^^^ -The :class:`Generalised Sobol` class is imported using the following command: +The :class:`GeneralisedSobolSensitivity` class is imported using the following command: ->>> from UQpy.sensitivity.generalised_sobol import GeneralisedSobol +>>> from UQpy.sensitivity.GeneralisedSobolSensitivity import GeneralisedSobolSensitivity Methods """"""" -.. autoclass:: UQpy.sensitivity.GeneralisedSobol +.. autoclass:: UQpy.sensitivity.GeneralisedSobolSensitivity :members: run Attributes """""""""" -.. autoattribute:: UQpy.sensitivity.GeneralisedSobol.gen_sobol_i -.. autoattribute:: UQpy.sensitivity.GeneralisedSobol.gen_sobol_total_i -.. autoattribute:: UQpy.sensitivity.GeneralisedSobol.n_samples -.. autoattribute:: UQpy.sensitivity.GeneralisedSobol.num_vars +.. autoattribute:: UQpy.sensitivity.GeneralisedSobolSensitivity.generalized_first_order_indices +.. autoattribute:: UQpy.sensitivity.GeneralisedSobolSensitivity.generalized_total_order_indices +.. autoattribute:: UQpy.sensitivity.GeneralisedSobolSensitivity.n_samples +.. autoattribute:: UQpy.sensitivity.GeneralisedSobolSensitivity.n_variables Examples """""""""" diff --git a/docs/source/sensitivity/index.rst b/docs/source/sensitivity/index.rst index 161cfd3b2..0cd153559 100644 --- a/docs/source/sensitivity/index.rst +++ b/docs/source/sensitivity/index.rst @@ -20,7 +20,7 @@ Sensitivity analysis comprises techniques focused on determining how the variati :hidden: :caption: Sensitivity - Chatterjee + Chatterjee Sensitivity CramĆ©r-von Mises Sensitivity Generalised Sobol Sensitivity Morris Sensitivity diff --git a/docs/source/sensitivity/sobol.rst b/docs/source/sensitivity/sobol.rst index 56c704a96..f2ecb537f 100644 --- a/docs/source/sensitivity/sobol.rst +++ b/docs/source/sensitivity/sobol.rst @@ -61,30 +61,28 @@ Compared to "Sobol1993", the "Janon2014" estimator makes more efficient use of m Sobol Class ^^^^^^^^^^^^^^^^^^ -The :class:`Sobol` class is imported using the following command: +The :class:`.SobolSensitivity` class is imported using the following command: ->>> from UQpy.sensitivity.SobolSensitivity import Sobol +>>> from UQpy.sensitivity.SobolSensitivity import SobolSensitivity ->>> from UQpy.sensitivity.SobolSensitivity import Sobol - ->>> from UQpy.sensitivity.Sobol import Sobol Methods """"""" -.. autoclass:: UQpy.sensitivity.Sobol +.. autoclass:: UQpy.sensitivity.SobolSensitivity :members: run Attributes """""""""" -.. autoattribute:: UQpy.sensitivity.Sobol.sobol_i -.. autoattribute:: UQpy.sensitivity.Sobol.sobol_total_i -.. autoattribute:: UQpy.sensitivity.Sobol.confidence_interval_sobol_i -.. autoattribute:: UQpy.sensitivity.Sobol.confidence_interval_sobol_total_i -.. autoattribute:: UQpy.sensitivity.Sobol.confidence_interval_sobol_ij -.. autoattribute:: UQpy.sensitivity.Sobol.n_samples -.. autoattribute:: UQpy.sensitivity.Sobol.num_vars -.. autoattribute:: UQpy.sensitivity.Sobol.multioutput +.. autoattribute:: UQpy.sensitivity.SobolSensitivity.first_order_indices +.. autoattribute:: UQpy.sensitivity.SobolSensitivity.second_order_indices +.. autoattribute:: UQpy.sensitivity.SobolSensitivity.total_order_indices +.. autoattribute:: UQpy.sensitivity.SobolSensitivity.first_order_confidence_interval +.. autoattribute:: UQpy.sensitivity.SobolSensitivity.second_order_confidence_interval +.. autoattribute:: UQpy.sensitivity.SobolSensitivity.total_order_confidence_interval +.. autoattribute:: UQpy.sensitivity.SobolSensitivity.n_samples +.. autoattribute:: UQpy.sensitivity.Sobol.n_variables +.. autoattribute:: UQpy.sensitivity.Sobol.is_multi_output Examples diff --git a/src/UQpy/sensitivity/ChatterjeeSensitivity.py b/src/UQpy/sensitivity/ChatterjeeSensitivity.py index 00ef7a940..99ab411b3 100644 --- a/src/UQpy/sensitivity/ChatterjeeSensitivity.py +++ b/src/UQpy/sensitivity/ChatterjeeSensitivity.py @@ -102,12 +102,6 @@ def run( :param confidence_level: Confidence level used to compute the confidence \ intervals of the CramĆ©r-von Mises indices. - - :return: A :class:`dict` with the following keys: \ - :code:`'chatterjee_i'` of shape :code:`(num_vars, 1)`, \ - :code:`'confidence_interval_chatterjee_i'` of shape :code:`(num_vars, 2)`, \ - :code:`'sobol_i'` of shape :code:`(num_vars, 1)`. - """ # Check nsamples @@ -188,13 +182,13 @@ def compute_chatterjee_indices( between the input random vectors :math:`X=\left[ X_{1}, X_{2},ā€¦,X_{d} \right]` and output random vector Y. - :param X: Input random vectors, :class:`numpy.ndarray` of shape :code:`(n_samples, num_vars)` + :param X: Input random vectors, :class:`numpy.ndarray` of shape :code:`(n_samples, n_variables)` :param Y: Output random vector, :class:`numpy.ndarray` of shape :code:`(n_samples, 1)` :param seed: Seed for the random number generator. - :return: Chatterjee sensitivity indices, :class:`numpy.ndarray` of shape :code:`(num_vars, 1)` + :return: Chatterjee sensitivity indices, :class:`numpy.ndarray` of shape :code:`(n_variables, 1)` """ @@ -388,9 +382,9 @@ def compute_Sobol_indices( :param A_model_evals: Model evaluations, :class:`numpy.ndarray` of shape :code:`(n_samples, 1)` - :param C_i_model_evals: Model evaluations, :class:`numpy.ndarray` of shape :code:`(n_samples, num_vars)` + :param C_i_model_evals: Model evaluations, :class:`numpy.ndarray` of shape :code:`(n_samples, n_variables)` - :return: First order Sobol indices, :class:`numpy.ndarray` of shape :code:`(num_vars, 1)` + :return: First order Sobol indices, :class:`numpy.ndarray` of shape :code:`(n_variables, 1)` """ @@ -422,7 +416,7 @@ def compute_rank_analog_of_f_C_i( **Inputs:** * **A_samples** (`ndarray`): - Shape: `(n_samples, num_vars)`. + Shape: `(n_samples, n_variables)`. * **A_model_evals** (`ndarray`): Shape: `(n_samples, 1)`. @@ -430,7 +424,7 @@ def compute_rank_analog_of_f_C_i( **Outputs:** * **A_i_model_evals** (`ndarray`): - Shape: `(n_samples, num_vars)`. + Shape: `(n_samples, n_variables)`. """ diff --git a/src/UQpy/sensitivity/CramerVonMisesSensitivity.py b/src/UQpy/sensitivity/CramerVonMisesSensitivity.py index 459b2f9e9..7acc8f536 100644 --- a/src/UQpy/sensitivity/CramerVonMisesSensitivity.py +++ b/src/UQpy/sensitivity/CramerVonMisesSensitivity.py @@ -113,13 +113,6 @@ def run( :param disable_CVM_indices: If :code:`True`, the CramĆ©r-von Mises indices \ are not computed. - - :return: A :class:`dict` with the following keys: \ - :code:`CVM_i` of shape :code:`(num_vars, 1)`, \ - :code:`confidence_interval_CVM_i` of shape :code:`(num_vars, 2)`, \ - :code:`sobol_i` of shape :code:`(num_vars, 1)`, \ - :code:`sobol_total_i` of shape :code:`(num_vars, 1)`. - """ # Check nsamples @@ -269,12 +262,12 @@ def pick_and_freeze_estimator( Shape: `(n_samples, 1)` * **C_i_model_evals** (`np.array`): - Shape: `(n_samples, num_vars)` + Shape: `(n_samples, n_variables)` **Outputs** * **First_order_CVM** (`np.array`): - Shape: `(num_vars)` + Shape: `(n_variables)` """ diff --git a/src/UQpy/sensitivity/GeneralisedSobolSensitivity.py b/src/UQpy/sensitivity/GeneralisedSobolSensitivity.py index 338e8cf59..544544073 100644 --- a/src/UQpy/sensitivity/GeneralisedSobolSensitivity.py +++ b/src/UQpy/sensitivity/GeneralisedSobolSensitivity.py @@ -69,10 +69,10 @@ def __init__( self.logger = logging.getLogger(__name__) self.generalized_first_order_indices = None - "Generalised first order Sobol indices, :class:`ndarray` of shape (n_variables, 1)" + "Generalised first order Sobol indices, :any:`numpy.ndarray` of shape (n_variables, 1)" self.generalized_total_order_indices = None - "Generalised total order Sobol indices, :class:`ndarray` of shape (n_variables, 1)" + "Generalised total order Sobol indices, :any:`numpy.ndarray` of shape (n_variables, 1)" self.n_samples = None "Number of samples used to compute the sensitivity indices, :class:`int`" @@ -100,13 +100,6 @@ def run( :param confidence_level: Confidence level used to compute the confidence \ intervals. Default is 0.95. - - :return: A :class:`dict` with the following keys: \ - :code:`gen_sobol_i` of shape :code:`(num_vars, 1)`, \ - :code:`gen_sobol_total_i` of shape :code:`(num_vars, 1)`, \ - :code:`confidence_interval_gen_sobol_i` of shape :code:`(num_vars, 2)`, \ - :code:`confidence_interval_gen_sobol_total_i` of shape :code:`(num_vars, 2)`. - """ # Check n_samples data type @@ -153,7 +146,7 @@ def run( self.n_outputs = A_model_evals.shape[1] - # shape: (n_outputs, n_samples, num_vars) + # shape: (n_outputs, n_samples, n_variables) C_i_model_evals = np.zeros((self.n_outputs, self.n_samples, self.n_variables)) for i, C_i in enumerate(C_i_generator): @@ -235,9 +228,11 @@ def compute_first_order_generalised_sobol_indices( :param A_model_evals: Model evaluations, :class:`numpy.ndarray` of shape :code:`(n_samples, n_outputs)`. :param B_model_evals: Model evaluations, :class:`numpy.ndarray` of shape :code:`(n_samples, n_outputs)`. - :param C_i_model_evals: Model evaluations, :class:`numpy.ndarray` of shape :code:`(n_outputs, n_samples, num_vars)`. + :param C_i_model_evals: Model evaluations, :class:`numpy.ndarray` of shape + :code:`(n_outputs, n_samples, n_variables)`. - :return: First order generalised Sobol indices, :class:`numpy.ndarray` of shape :code:`(n_outputs, num_vars)`. + :return: First order generalised Sobol indices, :class:`numpy.ndarray` of shape + :code:`(n_outputs, n_variables)`. """ @@ -304,9 +299,11 @@ def compute_total_order_generalised_sobol_indices( :param A_model_evals: Model evaluations, :class:`numpy.ndarray` of shape :code:`(n_samples, n_outputs)`. :param B_model_evals: Model evaluations, :class:`numpy.ndarray` of shape :code:`(n_samples, n_outputs)`. - :param C_i_model_evals: Model evaluations, :class:`numpy.ndarray` of shape :code:`(n_outputs, n_samples, num_vars)`. + :param C_i_model_evals: Model evaluations, :class:`numpy.ndarray` of shape + :code:`(n_outputs, n_samples, n_variables)`. - :return: Total order generalised Sobol indices, :class:`numpy.ndarray` of shape :code:`(n_outputs, num_vars)`. + :return: Total order generalised Sobol indices, :class:`numpy.ndarray` of shape + :code:`(n_outputs, n_variables)`. """ diff --git a/src/UQpy/sensitivity/SobolSensitivity.py b/src/UQpy/sensitivity/SobolSensitivity.py index 294bc3c57..8aa24e929 100644 --- a/src/UQpy/sensitivity/SobolSensitivity.py +++ b/src/UQpy/sensitivity/SobolSensitivity.py @@ -162,18 +162,6 @@ def run( :param second_order_scheme: Scheme used to compute the second order \ Sobol indices. Default is "Saltelli2002". - - :return: A :class:`dict` with the following keys: \ - :code:`sobol_i` of shape :code:`(num_vars, 1)`, \ - :code:`sobol_total_i` of shape :code:`(num_vars, 1)`, \ - :code:`sobol_ij` of shape :code:`(num_second_order_terms, 1)`, \ - :code:`confidence_interval_sobol_i` of shape :code:`(num_vars, 2)`, \ - if multioutput: Shape: `(n_outputs, num_vars, 2)`, \ - :code:`confidence_interval_sobol_total_i` of shape :code:`(num_vars, 2)`, \ - if multioutput: Shape: `(n_outputs, num_vars, 2)`, \ - :code:`confidence_interval_sobol_ij` of shape :code:`(num_second_order_terms, 2)` - if multioutput: Shape: `(n_outputs, num_second_order_terms, 2)`, \ - """ # Check n_samples data type self.n_samples = n_samples From ec2f910a4418efa32b4911b599d13d867d10279d Mon Sep 17 00:00:00 2001 From: Dimitris Tsapetis Date: Fri, 26 Aug 2022 16:23:05 +0300 Subject: [PATCH 70/88] Fixes failing Sobol Sensitivity tests --- tests/unit_tests/sensitivity/test_sobol.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/unit_tests/sensitivity/test_sobol.py b/tests/unit_tests/sensitivity/test_sobol.py index d9164e219..1b86c4e95 100644 --- a/tests/unit_tests/sensitivity/test_sobol.py +++ b/tests/unit_tests/sensitivity/test_sobol.py @@ -146,9 +146,9 @@ def saltelli_ishigami_Sobol_indices(sobol_object): np.random.seed(12345) #! set seed for reproducibility - computed_indices = SA.run(n_samples=1_000_000) + SA.run(n_samples=1_000_000) - return computed_indices["sobol_i"], computed_indices["sobol_total_i"] + return SA.first_order_indices, SA.total_order_indices @pytest.fixture() @@ -335,9 +335,9 @@ def saltelli_sobol_g_function(sobol_object_g_func): # Compute Sobol indices using the pick and freeze algorithm # Save only second order indices - computed_indices = SA.run(n_samples=100_000, estimate_second_order=True) + SA.run(n_samples=100_000, estimate_second_order=True) - return computed_indices["sobol_ij"] + return SA.second_order_indices # Unit tests From b82e53ee96030c234bf8fb599e6ea94435d41070 Mon Sep 17 00:00:00 2001 From: Michael Shields Date: Thu, 20 Oct 2022 14:14:49 -0400 Subject: [PATCH 71/88] Very minor updates to the documentation for Chatterjee Sensitivity documentation --- docs/source/sensitivity/chatterjee.rst | 2 +- src/UQpy/sensitivity/ChatterjeeSensitivity.py | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/docs/source/sensitivity/chatterjee.rst b/docs/source/sensitivity/chatterjee.rst index bc2ccb6f3..48d68a339 100644 --- a/docs/source/sensitivity/chatterjee.rst +++ b/docs/source/sensitivity/chatterjee.rst @@ -23,7 +23,7 @@ The :class:`.ChatterjeeSensitivity` class is imported using the following comman Methods """"""" .. autoclass:: UQpy.sensitivity.ChatterjeeSensitivity - :members: run, compute_chatterjee_indices, rank_analog_to_pickfreeze, compute_Sobol_indices + :members: run, compute_chatterjee_indices, rank_analog_to_pickfreeze, rank_analog_to_pickfreeze_vec, compute_Sobol_indices Attributes """""""""" diff --git a/src/UQpy/sensitivity/ChatterjeeSensitivity.py b/src/UQpy/sensitivity/ChatterjeeSensitivity.py index 99ab411b3..9473a98ee 100644 --- a/src/UQpy/sensitivity/ChatterjeeSensitivity.py +++ b/src/UQpy/sensitivity/ChatterjeeSensitivity.py @@ -89,7 +89,9 @@ def run( confidence_level: PositiveFloat = 0.95, ): """ - Compute the sensitivity indices using the Chatterjee method. + Compute the sensitivity indices using the Chatterjee method. Employing the :code:`run` method will initialize + :code:`n_samples` simulations using :class:`.RunModel`. To compute sensitivity indices using pre-computed inputs + and outputs, use the static methods described below. :param n_samples: Number of samples used to compute the Chatterjee indices. \ Default is 1,000. From 066e5a35ebef50dfcb4ca86bc9192bda2c7e4e07 Mon Sep 17 00:00:00 2001 From: Dimitris Tsapetis Date: Thu, 20 Oct 2022 14:26:30 -0400 Subject: [PATCH 72/88] Update README.rst --- README.rst | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/README.rst b/README.rst index 4bd0daeb4..919d87590 100644 --- a/README.rst +++ b/README.rst @@ -1,4 +1,4 @@ -|AzureDevops| |PyPIdownloads| |PyPI| |CondaSURG| |CondaPlatforms| |GithubRelease| |Binder| |Docs| |bear-ified| +|AzureDevops| |PyPIdownloads| |PyPI| |CondaPlatforms| |GithubRelease| |Binder| |Docs| |bear-ified| .. |Docs| image:: https://img.shields.io/readthedocs/uqpy?style=plastic :alt: Read the Docs .. |CondaPlatforms| image:: https://img.shields.io/conda/pn/SURG_JHU/uqpy?style=plastic :alt: Conda @@ -90,7 +90,6 @@ Using Conda * :: conda install -c conda-forge uqpy - conda install -c surg_jhu uqpy (latest version) Clone your fork of the UQpy repo from your GitHub account to your local disk (to get the latest version): From a79da949517463543e37d532a3e84fb9fe6cd52b Mon Sep 17 00:00:00 2001 From: Dimitris Tsapetis Date: Thu, 20 Oct 2022 14:43:37 -0400 Subject: [PATCH 73/88] Delete src/UQpy/surrogates/kriging directory --- src/UQpy/surrogates/kriging/Kriging.py | 354 ------------------ src/UQpy/surrogates/kriging/__init__.py | 4 - .../correlation_models/CubicCorrelation.py | 28 -- .../ExponentialCorrelation.py | 20 - .../correlation_models/GaussianCorrelation.py | 21 -- .../correlation_models/LinearCorrelation.py | 35 -- .../SphericalCorrelation.py | 28 -- .../correlation_models/SplineCorrelation.py | 58 --- .../kriging/correlation_models/__init__.py | 7 - .../baseclass/Correlation.py | 47 --- .../correlation_models/baseclass/__init__.py | 1 - .../regression_models/ConstantRegression.py | 10 - .../regression_models/LinearRegression.py | 12 - .../regression_models/QuadraticRegression.py | 37 -- .../kriging/regression_models/__init__.py | 4 - .../regression_models/baseclass/Regression.py | 14 - .../regression_models/baseclass/__init__.py | 1 - 17 files changed, 681 deletions(-) delete mode 100755 src/UQpy/surrogates/kriging/Kriging.py delete mode 100644 src/UQpy/surrogates/kriging/__init__.py delete mode 100644 src/UQpy/surrogates/kriging/correlation_models/CubicCorrelation.py delete mode 100644 src/UQpy/surrogates/kriging/correlation_models/ExponentialCorrelation.py delete mode 100644 src/UQpy/surrogates/kriging/correlation_models/GaussianCorrelation.py delete mode 100644 src/UQpy/surrogates/kriging/correlation_models/LinearCorrelation.py delete mode 100644 src/UQpy/surrogates/kriging/correlation_models/SphericalCorrelation.py delete mode 100644 src/UQpy/surrogates/kriging/correlation_models/SplineCorrelation.py delete mode 100644 src/UQpy/surrogates/kriging/correlation_models/__init__.py delete mode 100644 src/UQpy/surrogates/kriging/correlation_models/baseclass/Correlation.py delete mode 100644 src/UQpy/surrogates/kriging/correlation_models/baseclass/__init__.py delete mode 100644 src/UQpy/surrogates/kriging/regression_models/ConstantRegression.py delete mode 100644 src/UQpy/surrogates/kriging/regression_models/LinearRegression.py delete mode 100644 src/UQpy/surrogates/kriging/regression_models/QuadraticRegression.py delete mode 100644 src/UQpy/surrogates/kriging/regression_models/__init__.py delete mode 100644 src/UQpy/surrogates/kriging/regression_models/baseclass/Regression.py delete mode 100644 src/UQpy/surrogates/kriging/regression_models/baseclass/__init__.py diff --git a/src/UQpy/surrogates/kriging/Kriging.py b/src/UQpy/surrogates/kriging/Kriging.py deleted file mode 100755 index 19cbfaf1d..000000000 --- a/src/UQpy/surrogates/kriging/Kriging.py +++ /dev/null @@ -1,354 +0,0 @@ -import logging -from typing import Callable - -import numpy as np -from scipy.linalg import cholesky -import scipy.stats as stats -from beartype import beartype - -from UQpy.utilities import MinimizeOptimizer -from UQpy.utilities.Utilities import process_random_state -from UQpy.surrogates.baseclass.Surrogate import Surrogate -from UQpy.utilities.ValidationTypes import RandomStateType -from UQpy.surrogates.kriging.correlation_models.baseclass.Correlation import Correlation -from UQpy.surrogates.kriging.regression_models.baseclass.Regression import Regression - - -class Kriging(Surrogate): - @beartype - def __init__( - self, - regression_model: Regression, - correlation_model: Correlation, - correlation_model_parameters: list, - optimizer, - bounds: list = None, - optimize: bool = True, - optimizations_number: int = 1, - normalize: bool = True, - random_state: RandomStateType = None, - ): - """ - Īšriging generates an Gaussian process regression-based surrogate model to predict the model output at new sample - points. - - :param regression_model: `regression_model` specifies and evaluates the basis functions and their coefficients, - which defines the trend of the model. Built-in options: :class:`Constant`, :class:`Linear`, :class:`Quadratic` - :param correlation_model: `correlation_model` specifies and evaluates the correlation function. - Built-in options: :class:`Exponential`, :class:`Gaussian`, :class:`Linear`, :class:`Spherical`, - :class:`Cubic`, :class:`Spline` - :param correlation_model_parameters: List or array of initial values for the correlation model - hyperparameters/scale parameters. - :param bounds: Bounds on the hyperparameters used to solve optimization problem to estimate maximum likelihood - estimator. This should be a closed bound. - Default: :math:`[0.001, 10^7]` for each hyperparameter. - :param optimize: Indicator to solve MLE problem or not. If :any:'True' corr_model_params will be used as initial - solution for optimization problem. Otherwise, correlation_model_parameters will be directly use as the - hyperparamters. - Default: :any:`True`. - :param optimizations_number: Number of times MLE optimization problem is to be solved with a random starting - point. Default: :math:`1`. - :param normalize: Boolean flag used in case data normalization is required. - :param optimizer: Object of the :class:`Optimizer` optimizer used during the Kriging surrogate. - Default: :class:`.MinimizeOptimizer`. - :param random_state: Random seed used to initialize the pseudo-random number generator. If an :any:`int` is - provided, this sets the seed for an object of :class:`numpy.random.RandomState`. Otherwise, the - object itself can be passed directly. - """ - self.regression_model = regression_model - self.correlation_model = correlation_model - self.correlation_model_parameters = np.array(correlation_model_parameters) - self.bounds = bounds - self.optimizer = optimizer - self.optimizations_number = optimizations_number - self.optimize = optimize - self.normalize = normalize - self.logger = logging.getLogger(__name__) - self.random_state = random_state - - # Variables are used outside the __init__ - self.samples = None - self.values = None - self.sample_mean, self.sample_std = None, None - self.value_mean, self.value_std = None, None - self.rmodel, self.cmodel = None, None - self.beta: list = None - """Regression coefficients.""" - self.gamma = None - self.err_var: float = None - """Variance of the Gaussian random process.""" - self.F_dash = None - self.C_inv = None - self.G = None - self.F, self.R = None, None - - if isinstance(self.optimizer, str): - raise ValueError("The optimization function provided a input parameter cannot be None.") - - if optimizer._bounds is None: - optimizer.update_bounds([[0.001, 10 ** 7]] * self.correlation_model_parameters.shape[0]) - - self.jac = optimizer.supports_jacobian() - self.random_state = process_random_state(random_state) - - def fit( - self, - samples, - values, - optimizations_number: int = None, - correlation_model_parameters: list = None, - ): - """ - Fit the surrogate model using the training samples and the corresponding model values. - - The user can run this method multiple time after initiating the :class:`.Kriging` class object. - - This method updates the samples and parameters of the :class:`.Kriging` object. This method uses - `correlation_model_parameters` from previous run as the starting point for MLE problem unless user provides a - new starting point. - - :param samples: :class:`numpy.ndarray` containing the training points. - :param values: :class:`numpy.ndarray` containing the model evaluations at the training points. - :param optimizations_number: number of optimization iterations - :param correlation_model_parameters: List or array of initial values for the correlation model - hyperparameters/scale parameters. - - The :meth:`fit` method has no returns, although it creates the :py:attr:`beta`, :py:attr:`err_var` and - :py:attr:`C_inv` attributes of the :class:`.Kriging` class. - """ - self.logger.info("UQpy: Running kriging.fit") - - if optimizations_number is not None: - self.optimizations_number = optimizations_number - if correlation_model_parameters is not None: - self.correlation_model_parameters = np.array(correlation_model_parameters) - self.samples = np.array(samples) - - # Number of samples and dimensions of samples and values - nsamples, input_dim = self.samples.shape - output_dim = int(np.size(values) / nsamples) - - self.values = np.array(values).reshape(nsamples, output_dim) - - # Normalizing the data - if self.normalize: - self.sample_mean, self.sample_std = np.mean(self.samples, 0), np.std(self.samples, 0) - self.value_mean, self.value_std = np.mean(self.values, 0), np.std(self.values, 0) - s_ = (self.samples - self.sample_mean) / self.sample_std - y_ = (self.values - self.value_mean) / self.value_std - else: - s_ = self.samples - y_ = self.values - - self.F, jf_ = self.regression_model.r(s_) - - # Maximum Likelihood Estimation : Solving optimization problem to calculate hyperparameters - if self.optimize: - starting_point = self.correlation_model_parameters - - minimizer, fun_value = np.zeros([self.optimizations_number, input_dim]),\ - np.zeros([self.optimizations_number, 1]) - for i__ in range(self.optimizations_number): - p_ = self.optimizer.optimize(function=Kriging.log_likelihood, - initial_guess=starting_point, - args=(self.correlation_model, s_, self.F, y_, self.jac), - jac=self.jac) - print(p_.success) - # print(self.kwargs_optimizer) - minimizer[i__, :] = p_.x - fun_value[i__, 0] = p_.fun - # Generating new starting points using log-uniform distribution - if i__ != self.optimizations_number - 1: - starting_point = stats.reciprocal.rvs([j[0] for j in self.optimizer._bounds], - [j[1] for j in self.optimizer._bounds], 1, - random_state=self.random_state) - print(starting_point) - - if min(fun_value) == np.inf: - raise NotImplementedError("Maximum likelihood estimator failed: Choose different starting point or " - "increase nopt") - t = np.argmin(fun_value) - self.correlation_model_parameters = minimizer[t, :] - - # Updated Correlation matrix corresponding to MLE estimates of hyperparameters - self.R = self.correlation_model.c(x=s_, s=s_, params=self.correlation_model_parameters) - - self.beta, self.gamma, tmp = self._compute_additional_parameters(self.R) - self.C_inv, self.F_dash, self.G, self.err_var = tmp[1], tmp[3], tmp[2], tmp[5] - - self.logger.info("UQpy: kriging fit complete.") - - def _compute_additional_parameters(self, correlation_matrix): - if self.normalize: - y_ = (self.values - self.value_mean) / self.value_std - else: - y_ = self.values - # Compute the regression coefficient (solving this linear equation: F * beta = Y) - # Eq: 3.8, DACE - c = cholesky(correlation_matrix + (10 + self.samples.shape[0]) * 2 ** (-52) * np.eye(self.samples.shape[0]), - lower=True, check_finite=False) - c_inv = np.linalg.inv(c) - f_dash = np.linalg.solve(c, self.F) - y_dash = np.linalg.solve(c, y_) - q_, g_ = np.linalg.qr(f_dash) # Eq: 3.11, DACE - # Check if F is a full rank matrix - if np.linalg.matrix_rank(g_) != min(np.size(self.F, 0), np.size(self.F, 1)): - raise NotImplementedError("Chosen regression functions are not sufficiently linearly independent") - # Design parameters (beta: regression coefficient) - beta = np.linalg.solve(g_, np.matmul(np.transpose(q_), y_dash)) - - # Design parameter (R * gamma = Y - F * beta = residual) - gamma = np.linalg.solve(c.T, (y_dash - np.matmul(f_dash, beta))) - - # Computing the process variance (Eq: 3.13, DACE) - err_var = np.zeros(self.values.shape[1]) - for i in range(self.values.shape[1]): - err_var[i] = (1 / self.samples.shape[0]) * (np.linalg.norm(y_dash[:, i] - - np.matmul(f_dash, beta[:, i])) ** 2) - - return beta, gamma, (c, c_inv, g_, f_dash, y_dash, err_var) - - def predict(self, points: np.ndarray, return_std: bool = False, correlation_model_parameters: list = None): - """ - Predict the model response at new points. - - This method evaluates the regression and correlation model at new sample points. Then, it predicts the function - value and standard deviation. - - :param points: Points at which to predict the model response. - :param return_std: Indicator to estimate standard deviation. - :param correlation_model_parameters: Hyperparameters for correlation model. - :return: Predicted values at the new points, Standard deviation of predicted values at the new points - """ - x_ = np.atleast_2d(points) - if self.normalize: - x_ = (x_ - self.sample_mean) / self.sample_std - s_ = (self.samples - self.sample_mean) / self.sample_std - else: - s_ = self.samples - fx, jf = self.regression_model.r(x_) - if correlation_model_parameters is None: - correlation_model_parameters = self.correlation_model_parameters - rx = self.correlation_model.c( - x=x_, s=s_, params=correlation_model_parameters - ) - if correlation_model_parameters is None: - beta, gamma = self.beta, self.gamma - c_inv, f_dash, g_, err_var = self.C_inv, self.F_dash, self.G, self.err_var - else: - beta, gamma, tmp = self._compute_additional_parameters( - self.correlation_model.c(x=s_, s=s_, params=correlation_model_parameters)) - c_inv, f_dash, g_, err_var = tmp[1], tmp[3], tmp[2], tmp[5] - y = np.einsum("ij,jk->ik", fx, beta) + np.einsum( - "ij,jk->ik", rx, gamma - ) - if self.normalize: - y = self.value_mean + y * self.value_std - if x_.shape[1] == 1: - y = y.flatten() - if return_std: - r_dash = np.matmul(c_inv, rx.T) - u = np.matmul(f_dash.T, r_dash) - fx.T - norm1 = np.linalg.norm(r_dash, 2, 0) - norm2 = np.linalg.norm(np.linalg.solve(g_, u), 2, 0) - mse = np.sqrt(err_var * np.atleast_2d(1 + norm2 - norm1).T) - if self.normalize: - mse = self.value_std * mse - if x_.shape[1] == 1: - mse = mse.flatten() - return y, mse - else: - return y - - def jacobian(self, points: np.ndarray): - """ - Predict the gradient of the model at new points. - - This method evaluates the regression and correlation model at new sample point. Then, it predicts the gradient - using the regression coefficients and the training second_order_tensor. - - :param points: Points at which to evaluate the gradient. - :return: Gradient of the surrogate model evaluated at the new points. - """ - x_ = np.atleast_2d(points) - if self.normalize: - x_ = (x_ - self.sample_mean) / self.sample_std - s_ = (self.samples - self.sample_mean) / self.sample_std - else: - s_ = self.samples - - fx, jf = self.regression_model.r(x_) - rx, drdx = self.correlation_model.c( - x=x_, s=s_, params=self.correlation_model_parameters, dx=True - ) - y_grad = np.einsum("ikj,jm->ik", jf, self.beta) + np.einsum( - "ijk,jm->ki", drdx.T, self.gamma - ) - if self.normalize: - y_grad = y_grad * self.value_std / self.sample_std - if x_.shape[1] == 1: - y_grad = y_grad.flatten() - return y_grad - - @staticmethod - def log_likelihood(p0, cm, s, f, y, return_grad): - # Return the log-likelihood function and it's gradient. Gradient is calculate using Central Difference - m = s.shape[0] - n = s.shape[1] - r__, dr_ = cm.c(x=s, s=s, params=p0, dt=True) - try: - cc = cholesky(r__ + 2 ** (-52) * np.eye(m), lower=True) - except np.linalg.LinAlgError: - return np.inf, np.zeros(n) - - # Product of diagonal terms is negligible sometimes, even when cc exists. - if np.prod(np.diagonal(cc)) == 0: - return np.inf, np.zeros(n) - - cc_inv = np.linalg.inv(cc) - r_inv = np.matmul(cc_inv.T, cc_inv) - f__ = cc_inv.dot(f) - y__ = cc_inv.dot(y) - - q__, g__ = np.linalg.qr(f__) # Eq: 3.11, DACE - - # Check if F is a full rank matrix - if np.linalg.matrix_rank(g__) != min(np.size(f__, 0), np.size(f__, 1)): - raise NotImplementedError( - "Chosen regression functions are not sufficiently linearly independent" - ) - - # Design parameters - beta_ = np.linalg.solve(g__, np.matmul(np.transpose(q__), y__)) - - # Computing the process variance (Eq: 3.13, DACE) - sigma_ = np.zeros(y.shape[1]) - - ll = 0 - for out_dim in range(y.shape[1]): - sigma_[out_dim] = (1 / m) * ( - np.linalg.norm(y__[:, out_dim] - np.matmul(f__, beta_[:, out_dim])) ** 2) - # Objective function:= log(det(sigma**2 * R)) + constant - ll = (ll + ( np.log(np.linalg.det(sigma_[out_dim] * r__)) + m * (np.log(2 * np.pi) + 1)) / 2) - - # Gradient of loglikelihood - # Reference: C. E. Rasmussen & C. K. I. Williams, Gaussian Processes for Machine Learning, the MIT Press, - # 2006, ISBN 026218253X. (Page 114, Eq.(5.9)) - residual = y - np.matmul(f, beta_) - gamma = np.matmul(r_inv, residual) - grad_mle = np.zeros(n) - for in_dim in range(n): - r_inv_derivative = np.matmul(r_inv, np.matmul(dr_[:, :, in_dim], r_inv)) - tmp = np.matmul(residual.T, np.matmul(r_inv_derivative, residual)) - for out_dim in range(y.shape[1]): - alpha = gamma / sigma_[out_dim] - tmp1 = np.matmul(alpha, alpha.T) - r_inv / sigma_[out_dim] - cov_der = sigma_[out_dim] * dr_[:, :, in_dim] + tmp * r__ / m - grad_mle[in_dim] = grad_mle[in_dim] - 0.5 * np.trace( - np.matmul(tmp1, cov_der) - ) - - if return_grad: - return ll, grad_mle - else: - return ll diff --git a/src/UQpy/surrogates/kriging/__init__.py b/src/UQpy/surrogates/kriging/__init__.py deleted file mode 100644 index 55a50199d..000000000 --- a/src/UQpy/surrogates/kriging/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from UQpy.surrogates.kriging.Kriging import Kriging - -from UQpy.surrogates.kriging.regression_models import * -from UQpy.surrogates.kriging.correlation_models import * diff --git a/src/UQpy/surrogates/kriging/correlation_models/CubicCorrelation.py b/src/UQpy/surrogates/kriging/correlation_models/CubicCorrelation.py deleted file mode 100644 index 3e909507f..000000000 --- a/src/UQpy/surrogates/kriging/correlation_models/CubicCorrelation.py +++ /dev/null @@ -1,28 +0,0 @@ -from UQpy.surrogates.kriging.correlation_models.baseclass.Correlation import * - - -class CubicCorrelation(Correlation): - def c(self, x, s, params, dt=False, dx=False): - zeta_matrix, dtheta_derivs, dx_derivs = Correlation.derivatives( - x_=x, s_=s, params=params - ) - # Initial matrices containing derivates for all values in array. Note since - # dtheta_s and dx_s already accounted for where derivative should be zero, all - # that must be done is multiplying the |dij| or thetaj matrix on top of a - # matrix of derivates w.r.t zeta (in this case, dzeta = -6zeta+6zeta**2) - drdt = (-6 * zeta_matrix + 6 * zeta_matrix ** 2) * dtheta_derivs - drdx = (-6 * zeta_matrix + 6 * zeta_matrix ** 2) * dx_derivs - # Also, create matrix for values of equation, 1 - 3zeta**2 + 2zeta**3, for loop - zeta_function_cubic = 1 - 3 * zeta_matrix ** 2 + 2 * zeta_matrix ** 3 - rx = np.prod(zeta_function_cubic, 2) - if dt: - # Same as previous example, loop over zeta matrix by shifting index - for i in range(len(params) - 1): - drdt = drdt * np.roll(zeta_function_cubic, i + 1, axis=2) - return rx, drdt - if dx: - # Same as previous example, loop over zeta matrix by shifting index - for i in range(len(params) - 1): - drdx = drdx * np.roll(zeta_function_cubic, i + 1, axis=2) - return rx, drdx - return rx diff --git a/src/UQpy/surrogates/kriging/correlation_models/ExponentialCorrelation.py b/src/UQpy/surrogates/kriging/correlation_models/ExponentialCorrelation.py deleted file mode 100644 index 94702760c..000000000 --- a/src/UQpy/surrogates/kriging/correlation_models/ExponentialCorrelation.py +++ /dev/null @@ -1,20 +0,0 @@ -from UQpy.surrogates.kriging.correlation_models.baseclass.Correlation import * - - -class ExponentialCorrelation(Correlation): - def c(self, x, s, params, dt=False, dx=False): - stack = Correlation.check_samples_and_return_stack(x, s) - rx = np.exp(np.sum(-params * abs(stack), axis=2)) - if dt: - drdt = -abs(stack) * np.transpose( - np.tile(rx, (np.size(x, 1), 1, 1)), (1, 2, 0) - ) - return rx, drdt - if dx: - drdx = ( - -params - * np.sign(stack) - * np.transpose(np.tile(rx, (np.size(x, 1), 1, 1)), (1, 2, 0)) - ) - return rx, drdx - return rx diff --git a/src/UQpy/surrogates/kriging/correlation_models/GaussianCorrelation.py b/src/UQpy/surrogates/kriging/correlation_models/GaussianCorrelation.py deleted file mode 100644 index 05ce09830..000000000 --- a/src/UQpy/surrogates/kriging/correlation_models/GaussianCorrelation.py +++ /dev/null @@ -1,21 +0,0 @@ -from UQpy.surrogates.kriging.correlation_models.baseclass.Correlation import * - - -class GaussianCorrelation(Correlation): - def c(self, x, s, params, dt=False, dx=False): - stack = Correlation.check_samples_and_return_stack(x, s) - rx = np.exp(np.sum(-params * (stack ** 2), axis=2)) - if dt: - drdt = -(stack ** 2) * np.transpose( - np.tile(rx, (np.size(x, 1), 1, 1)), (1, 2, 0) - ) - return rx, drdt - if dx: - drdx = ( - -2 - * params - * stack - * np.transpose(np.tile(rx, (np.size(x, 1), 1, 1)), (1, 2, 0)) - ) - return rx, drdx - return rx diff --git a/src/UQpy/surrogates/kriging/correlation_models/LinearCorrelation.py b/src/UQpy/surrogates/kriging/correlation_models/LinearCorrelation.py deleted file mode 100644 index 69d7f1506..000000000 --- a/src/UQpy/surrogates/kriging/correlation_models/LinearCorrelation.py +++ /dev/null @@ -1,35 +0,0 @@ -from UQpy.surrogates.kriging.correlation_models.baseclass.Correlation import * - - -class LinearCorrelation(Correlation): - def c(self, x, s, params, dt=False, dx=False): - stack = Correlation.check_samples_and_return_stack(x, s) - # Taking stack and turning each d value into 1-theta*dij - after_parameters = 1 - params * abs(stack) - # Define matrix of zeros to compare against (not necessary to be defined separately, - # but the line is bulky if this isn't defined first, and it is used more than once) - comp_zero = np.zeros((np.size(x, 0), np.size(s, 0), np.size(s, 1))) - # Compute matrix of max{0,1-theta*d} - max_matrix = np.maximum(after_parameters, comp_zero) - rx = np.prod(max_matrix, 2) - # Create matrix that has 1s where max_matrix is nonzero - # -Essentially, this acts as a way to store the indices of where the values are nonzero - ones_and_zeros = max_matrix.astype(bool).astype(int) - # Set initial derivatives as if all were positive - first_dtheta = -abs(stack) - first_dx = np.negative(params) * np.sign(stack) - # Multiply derivs by ones_and_zeros...this will set the values where the - # derivative should be zero to zero, and keep all other values the same - drdt = np.multiply(first_dtheta, ones_and_zeros) - drdx = np.multiply(first_dx, ones_and_zeros) - if dt: - # Loop over parameters, shifting max_matrix and multiplying over derivative matrix with each iter - for i in range(len(params) - 1): - drdt = drdt * np.roll(max_matrix, i + 1, axis=2) - return rx, drdt - if dx: - # Loop over parameters, shifting max_matrix and multiplying over derivative matrix with each iter - for i in range(len(params) - 1): - drdx = drdx * np.roll(max_matrix, i + 1, axis=2) - return rx, drdx - return rx diff --git a/src/UQpy/surrogates/kriging/correlation_models/SphericalCorrelation.py b/src/UQpy/surrogates/kriging/correlation_models/SphericalCorrelation.py deleted file mode 100644 index 1f6b8173d..000000000 --- a/src/UQpy/surrogates/kriging/correlation_models/SphericalCorrelation.py +++ /dev/null @@ -1,28 +0,0 @@ -from UQpy.surrogates.kriging.correlation_models.baseclass.Correlation import * - - -class SphericalCorrelation(Correlation): - def c(self, x, s, params, dt=False, dx=False): - zeta_matrix, dtheta_derivs, dx_derivs = Correlation.derivatives( - x_=x, s_=s, params=params - ) - # Initial matrices containing derivates for all values in array. Note since - # dtheta_s and dx_s already accounted for where derivative should be zero, all - # that must be done is multiplying the |dij| or thetaj matrix on top of a - # matrix of derivates w.r.t zeta (in this case, dzeta = -1.5+1.5zeta**2) - drdt = (-1.5 + 1.5 * zeta_matrix ** 2) * dtheta_derivs - drdx = (-1.5 + 1.5 * zeta_matrix ** 2) * dx_derivs - # Also, create matrix for values of equation, 1 - 1.5zeta + 0.5zeta**3, for loop - zeta_function = 1 - 1.5 * zeta_matrix + 0.5 * zeta_matrix ** 3 - rx = np.prod(zeta_function, 2) - if dt: - # Same as previous example, loop over zeta matrix by shifting index - for i in range(len(params) - 1): - drdt = drdt * np.roll(zeta_function, i + 1, axis=2) - return rx, drdt - if dx: - # Same as previous example, loop over zeta matrix by shifting index - for i in range(len(params) - 1): - drdx = drdx * np.roll(zeta_function, i + 1, axis=2) - return rx, drdx - return rx diff --git a/src/UQpy/surrogates/kriging/correlation_models/SplineCorrelation.py b/src/UQpy/surrogates/kriging/correlation_models/SplineCorrelation.py deleted file mode 100644 index 0aa6282d1..000000000 --- a/src/UQpy/surrogates/kriging/correlation_models/SplineCorrelation.py +++ /dev/null @@ -1,58 +0,0 @@ -from UQpy.surrogates.kriging.correlation_models.baseclass.Correlation import * - - -class SplineCorrelation(Correlation): - def c(self, x, s, params, dt=False, dx=False): - # x_, s_ = np.atleast_2d(x_), np.atleast_2d(s_) - # # Create stack matrix, where each block is x_i with all s - # stack = np.tile(np.swapaxes(np.atleast_3d(x_), 1, 2), (1, np.size(s_, 0), 1)) - np.tile(s_, ( - # np.size(x_, 0), - # 1, 1)) - stack = Correlation.check_samples_and_return_stack(x, s) - # In this case, the zeta value is just abs(stack)*parameters, no comparison - zeta_matrix = abs(stack) * params - # So, dtheta and dx are just |dj| and theta*sgn(dj), respectively - dtheta_derivs = abs(stack) - # dx_derivs = np.ones((np.size(x,0),np.size(s,0),np.size(s,1)))*parameters - dx_derivs = np.sign(stack) * params - - # Initialize empty sigma and dsigma matrices - sigma = np.ones( - (zeta_matrix.shape[0], zeta_matrix.shape[1], zeta_matrix.shape[2]) - ) - dsigma = np.ones( - (zeta_matrix.shape[0], zeta_matrix.shape[1], zeta_matrix.shape[2]) - ) - - # Loop over cases to create zeta_matrix and subsequent dR matrices - for i in range(zeta_matrix.shape[0]): - for j in range(zeta_matrix.shape[1]): - for k in range(zeta_matrix.shape[2]): - y = zeta_matrix[i, j, k] - if 0 <= y <= 0.2: - sigma[i, j, k] = 1 - 15 * y ** 2 + 30 * y ** 3 - dsigma[i, j, k] = -30 * y + 90 * y ** 2 - elif 0.2 < y < 1.0: - sigma[i, j, k] = 1.25 * (1 - y) ** 3 - dsigma[i, j, k] = 3.75 * (1 - y) ** 2 * -1 - elif y >= 1: - sigma[i, j, k] = 0 - dsigma[i, j, k] = 0 - - rx = np.prod(sigma, 2) - - if dt: - # Initialize derivative matrices incorporating chain rule - drdt = dsigma * dtheta_derivs - # Loop over to create proper matrices - for i in range(len(params) - 1): - drdt = drdt * np.roll(sigma, i + 1, axis=2) - return rx, drdt - if dx: - # Initialize derivative matrices incorporating chain rule - drdx = dsigma * dx_derivs - # Loop over to create proper matrices - for i in range(len(params) - 1): - drdx = drdx * np.roll(sigma, i + 1, axis=2) - return rx, drdx - return rx diff --git a/src/UQpy/surrogates/kriging/correlation_models/__init__.py b/src/UQpy/surrogates/kriging/correlation_models/__init__.py deleted file mode 100644 index 10f39dafc..000000000 --- a/src/UQpy/surrogates/kriging/correlation_models/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from UQpy.surrogates.kriging.correlation_models.baseclass import * -from UQpy.surrogates.kriging.correlation_models.CubicCorrelation import CubicCorrelation -from UQpy.surrogates.kriging.correlation_models.ExponentialCorrelation import ExponentialCorrelation -from UQpy.surrogates.kriging.correlation_models.GaussianCorrelation import GaussianCorrelation -from UQpy.surrogates.kriging.correlation_models.LinearCorrelation import LinearCorrelation -from UQpy.surrogates.kriging.correlation_models.SphericalCorrelation import SphericalCorrelation -from UQpy.surrogates.kriging.correlation_models.SplineCorrelation import SplineCorrelation diff --git a/src/UQpy/surrogates/kriging/correlation_models/baseclass/Correlation.py b/src/UQpy/surrogates/kriging/correlation_models/baseclass/Correlation.py deleted file mode 100644 index 703461b5f..000000000 --- a/src/UQpy/surrogates/kriging/correlation_models/baseclass/Correlation.py +++ /dev/null @@ -1,47 +0,0 @@ -from abc import ABC, abstractmethod -import numpy as np - - -class Correlation(ABC): - """ - Abstract base class of all Correlations. Serves as a template for creating new Kriging correlation - functions. - """ - - @abstractmethod - def c(self, x, s, params, dt=False, dx=False): - """ - Abstract method that needs to be implemented by the user when creating a new Correlation function. - """ - pass - - @staticmethod - def check_samples_and_return_stack(x, s): - x_, s_ = np.atleast_2d(x), np.atleast_2d(s) - # Create stack matrix, where each block is x_i with all s - stack = np.tile( - np.swapaxes(np.atleast_3d(x_), 1, 2), (1, np.size(s_, 0), 1) - ) - np.tile(s_, (np.size(x_, 0), 1, 1)) - return stack - - @staticmethod - def derivatives(x_, s_, params): - stack = Correlation.check_samples_and_return_stack(x_, s_) - # Taking stack and creating array of all thetaj*dij - after_parameters = params * abs(stack) - # Create matrix of all ones to compare - comp_ones = np.ones((np.size(x_, 0), np.size(s_, 0), np.size(s_, 1))) - # zeta_matrix has all values min{1,theta*dij} - zeta_matrix_ = np.minimum(after_parameters, comp_ones) - # Copy zeta_matrix to another matrix that will used to find where derivative should be zero - indices = zeta_matrix_.copy() - # If value of min{1,theta*dij} is 1, the derivative should be 0. - # So, replace all values of 1 with 0, then perform the .astype(bool).astype(int) - # operation like in the linear example, so you end up with an array of 1's where - # the derivative should be caluclated and 0 where it should be zero - indices[indices == 1] = 0 - # Create matrix of all |dij| (where non zero) to be used in calculation of dR/dtheta - dtheta_derivs_ = indices.astype(bool).astype(int) * abs(stack) - # Same as above, but for matrix of all thetaj where non-zero - dx_derivs_ = indices.astype(bool).astype(int) * params * np.sign(stack) - return zeta_matrix_, dtheta_derivs_, dx_derivs_ diff --git a/src/UQpy/surrogates/kriging/correlation_models/baseclass/__init__.py b/src/UQpy/surrogates/kriging/correlation_models/baseclass/__init__.py deleted file mode 100644 index e8cf1815d..000000000 --- a/src/UQpy/surrogates/kriging/correlation_models/baseclass/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from UQpy.surrogates.kriging.correlation_models.baseclass.Correlation import Correlation diff --git a/src/UQpy/surrogates/kriging/regression_models/ConstantRegression.py b/src/UQpy/surrogates/kriging/regression_models/ConstantRegression.py deleted file mode 100644 index 0e4f9e984..000000000 --- a/src/UQpy/surrogates/kriging/regression_models/ConstantRegression.py +++ /dev/null @@ -1,10 +0,0 @@ -import numpy as np -from UQpy.surrogates.kriging.regression_models.baseclass.Regression import Regression - - -class ConstantRegression(Regression): - def r(self, s): - s = np.atleast_2d(s) - fx = np.ones([np.size(s, 0), 1]) - jf = np.zeros([np.size(s, 0), np.size(s, 1), 1]) - return fx, jf diff --git a/src/UQpy/surrogates/kriging/regression_models/LinearRegression.py b/src/UQpy/surrogates/kriging/regression_models/LinearRegression.py deleted file mode 100644 index 118d8d73c..000000000 --- a/src/UQpy/surrogates/kriging/regression_models/LinearRegression.py +++ /dev/null @@ -1,12 +0,0 @@ -import numpy as np -from UQpy.surrogates.kriging.regression_models.baseclass.Regression import Regression - - -class LinearRegression(Regression): - def r(self, s): - s = np.atleast_2d(s) - fx = np.concatenate((np.ones([np.size(s, 0), 1]), s), 1) - jf_b = np.zeros([np.size(s, 0), np.size(s, 1), np.size(s, 1)]) - np.einsum("jii->ji", jf_b)[:] = 1 - jf = np.concatenate((np.zeros([np.size(s, 0), np.size(s, 1), 1]), jf_b), 2) - return fx, jf diff --git a/src/UQpy/surrogates/kriging/regression_models/QuadraticRegression.py b/src/UQpy/surrogates/kriging/regression_models/QuadraticRegression.py deleted file mode 100644 index fdddefbb5..000000000 --- a/src/UQpy/surrogates/kriging/regression_models/QuadraticRegression.py +++ /dev/null @@ -1,37 +0,0 @@ -import numpy as np -from UQpy.surrogates.kriging.regression_models.baseclass.Regression import Regression - - -class QuadraticRegression(Regression): - def r(self, s): - s = np.atleast_2d(s) - fx = np.zeros( - [np.size(s, 0), int((np.size(s, 1) + 1) * (np.size(s, 1) + 2) / 2)] - ) - jf = np.zeros( - [ - np.size(s, 0), - np.size(s, 1), - int((np.size(s, 1) + 1) * (np.size(s, 1) + 2) / 2), - ] - ) - for i in range(np.size(s, 0)): - temp = np.hstack((1, s[i, :])) - for j in range(np.size(s, 1)): - temp = np.hstack((temp, s[i, j] * s[i, j::])) - fx[i, :] = temp - # definie H matrix - h_ = 0 - for j in range(np.size(s, 1)): - tmp_ = s[i, j] * np.eye(np.size(s, 1)) - t1 = np.zeros([np.size(s, 1), np.size(s, 1)]) - t1[j, :] = s[i, :] - tmp = tmp_ + t1 - if j == 0: - h_ = tmp[:, j::] - else: - h_ = np.hstack((h_, tmp[:, j::])) - jf[i, :, :] = np.hstack( - (np.zeros([np.size(s, 1), 1]), np.eye(np.size(s, 1)), h_) - ) - return fx, jf diff --git a/src/UQpy/surrogates/kriging/regression_models/__init__.py b/src/UQpy/surrogates/kriging/regression_models/__init__.py deleted file mode 100644 index e6da265b3..000000000 --- a/src/UQpy/surrogates/kriging/regression_models/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from UQpy.surrogates.kriging.regression_models.baseclass import * -from UQpy.surrogates.kriging.regression_models.ConstantRegression import ConstantRegression -from UQpy.surrogates.kriging.regression_models.LinearRegression import LinearRegression -from UQpy.surrogates.kriging.regression_models.QuadraticRegression import QuadraticRegression diff --git a/src/UQpy/surrogates/kriging/regression_models/baseclass/Regression.py b/src/UQpy/surrogates/kriging/regression_models/baseclass/Regression.py deleted file mode 100644 index 3d435e51d..000000000 --- a/src/UQpy/surrogates/kriging/regression_models/baseclass/Regression.py +++ /dev/null @@ -1,14 +0,0 @@ -from abc import ABC, abstractmethod - - -class Regression(ABC): - """ - Abstract base class of all Regressions. Serves as a template for creating new Kriging regression - functions. - """ - @abstractmethod - def r(self, s): - """ - Abstract method that needs to be implemented by the user when creating a new Regression function. - """ - pass diff --git a/src/UQpy/surrogates/kriging/regression_models/baseclass/__init__.py b/src/UQpy/surrogates/kriging/regression_models/baseclass/__init__.py deleted file mode 100644 index 004e95e23..000000000 --- a/src/UQpy/surrogates/kriging/regression_models/baseclass/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from UQpy.surrogates.kriging.regression_models.baseclass.Regression import Regression From bba0a67f2fbb8f094e3e2dcc6c56238d6c25361d Mon Sep 17 00:00:00 2001 From: Dimitris Tsapetis Date: Thu, 20 Oct 2022 15:18:39 -0400 Subject: [PATCH 74/88] Revert "Delete src/UQpy/surrogates/kriging directory" This reverts commit a79da949517463543e37d532a3e84fb9fe6cd52b. --- src/UQpy/surrogates/kriging/Kriging.py | 354 ++++++++++++++++++ src/UQpy/surrogates/kriging/__init__.py | 4 + .../correlation_models/CubicCorrelation.py | 28 ++ .../ExponentialCorrelation.py | 20 + .../correlation_models/GaussianCorrelation.py | 21 ++ .../correlation_models/LinearCorrelation.py | 35 ++ .../SphericalCorrelation.py | 28 ++ .../correlation_models/SplineCorrelation.py | 58 +++ .../kriging/correlation_models/__init__.py | 7 + .../baseclass/Correlation.py | 47 +++ .../correlation_models/baseclass/__init__.py | 1 + .../regression_models/ConstantRegression.py | 10 + .../regression_models/LinearRegression.py | 12 + .../regression_models/QuadraticRegression.py | 37 ++ .../kriging/regression_models/__init__.py | 4 + .../regression_models/baseclass/Regression.py | 14 + .../regression_models/baseclass/__init__.py | 1 + 17 files changed, 681 insertions(+) create mode 100755 src/UQpy/surrogates/kriging/Kriging.py create mode 100644 src/UQpy/surrogates/kriging/__init__.py create mode 100644 src/UQpy/surrogates/kriging/correlation_models/CubicCorrelation.py create mode 100644 src/UQpy/surrogates/kriging/correlation_models/ExponentialCorrelation.py create mode 100644 src/UQpy/surrogates/kriging/correlation_models/GaussianCorrelation.py create mode 100644 src/UQpy/surrogates/kriging/correlation_models/LinearCorrelation.py create mode 100644 src/UQpy/surrogates/kriging/correlation_models/SphericalCorrelation.py create mode 100644 src/UQpy/surrogates/kriging/correlation_models/SplineCorrelation.py create mode 100644 src/UQpy/surrogates/kriging/correlation_models/__init__.py create mode 100644 src/UQpy/surrogates/kriging/correlation_models/baseclass/Correlation.py create mode 100644 src/UQpy/surrogates/kriging/correlation_models/baseclass/__init__.py create mode 100644 src/UQpy/surrogates/kriging/regression_models/ConstantRegression.py create mode 100644 src/UQpy/surrogates/kriging/regression_models/LinearRegression.py create mode 100644 src/UQpy/surrogates/kriging/regression_models/QuadraticRegression.py create mode 100644 src/UQpy/surrogates/kriging/regression_models/__init__.py create mode 100644 src/UQpy/surrogates/kriging/regression_models/baseclass/Regression.py create mode 100644 src/UQpy/surrogates/kriging/regression_models/baseclass/__init__.py diff --git a/src/UQpy/surrogates/kriging/Kriging.py b/src/UQpy/surrogates/kriging/Kriging.py new file mode 100755 index 000000000..19cbfaf1d --- /dev/null +++ b/src/UQpy/surrogates/kriging/Kriging.py @@ -0,0 +1,354 @@ +import logging +from typing import Callable + +import numpy as np +from scipy.linalg import cholesky +import scipy.stats as stats +from beartype import beartype + +from UQpy.utilities import MinimizeOptimizer +from UQpy.utilities.Utilities import process_random_state +from UQpy.surrogates.baseclass.Surrogate import Surrogate +from UQpy.utilities.ValidationTypes import RandomStateType +from UQpy.surrogates.kriging.correlation_models.baseclass.Correlation import Correlation +from UQpy.surrogates.kriging.regression_models.baseclass.Regression import Regression + + +class Kriging(Surrogate): + @beartype + def __init__( + self, + regression_model: Regression, + correlation_model: Correlation, + correlation_model_parameters: list, + optimizer, + bounds: list = None, + optimize: bool = True, + optimizations_number: int = 1, + normalize: bool = True, + random_state: RandomStateType = None, + ): + """ + Īšriging generates an Gaussian process regression-based surrogate model to predict the model output at new sample + points. + + :param regression_model: `regression_model` specifies and evaluates the basis functions and their coefficients, + which defines the trend of the model. Built-in options: :class:`Constant`, :class:`Linear`, :class:`Quadratic` + :param correlation_model: `correlation_model` specifies and evaluates the correlation function. + Built-in options: :class:`Exponential`, :class:`Gaussian`, :class:`Linear`, :class:`Spherical`, + :class:`Cubic`, :class:`Spline` + :param correlation_model_parameters: List or array of initial values for the correlation model + hyperparameters/scale parameters. + :param bounds: Bounds on the hyperparameters used to solve optimization problem to estimate maximum likelihood + estimator. This should be a closed bound. + Default: :math:`[0.001, 10^7]` for each hyperparameter. + :param optimize: Indicator to solve MLE problem or not. If :any:'True' corr_model_params will be used as initial + solution for optimization problem. Otherwise, correlation_model_parameters will be directly use as the + hyperparamters. + Default: :any:`True`. + :param optimizations_number: Number of times MLE optimization problem is to be solved with a random starting + point. Default: :math:`1`. + :param normalize: Boolean flag used in case data normalization is required. + :param optimizer: Object of the :class:`Optimizer` optimizer used during the Kriging surrogate. + Default: :class:`.MinimizeOptimizer`. + :param random_state: Random seed used to initialize the pseudo-random number generator. If an :any:`int` is + provided, this sets the seed for an object of :class:`numpy.random.RandomState`. Otherwise, the + object itself can be passed directly. + """ + self.regression_model = regression_model + self.correlation_model = correlation_model + self.correlation_model_parameters = np.array(correlation_model_parameters) + self.bounds = bounds + self.optimizer = optimizer + self.optimizations_number = optimizations_number + self.optimize = optimize + self.normalize = normalize + self.logger = logging.getLogger(__name__) + self.random_state = random_state + + # Variables are used outside the __init__ + self.samples = None + self.values = None + self.sample_mean, self.sample_std = None, None + self.value_mean, self.value_std = None, None + self.rmodel, self.cmodel = None, None + self.beta: list = None + """Regression coefficients.""" + self.gamma = None + self.err_var: float = None + """Variance of the Gaussian random process.""" + self.F_dash = None + self.C_inv = None + self.G = None + self.F, self.R = None, None + + if isinstance(self.optimizer, str): + raise ValueError("The optimization function provided a input parameter cannot be None.") + + if optimizer._bounds is None: + optimizer.update_bounds([[0.001, 10 ** 7]] * self.correlation_model_parameters.shape[0]) + + self.jac = optimizer.supports_jacobian() + self.random_state = process_random_state(random_state) + + def fit( + self, + samples, + values, + optimizations_number: int = None, + correlation_model_parameters: list = None, + ): + """ + Fit the surrogate model using the training samples and the corresponding model values. + + The user can run this method multiple time after initiating the :class:`.Kriging` class object. + + This method updates the samples and parameters of the :class:`.Kriging` object. This method uses + `correlation_model_parameters` from previous run as the starting point for MLE problem unless user provides a + new starting point. + + :param samples: :class:`numpy.ndarray` containing the training points. + :param values: :class:`numpy.ndarray` containing the model evaluations at the training points. + :param optimizations_number: number of optimization iterations + :param correlation_model_parameters: List or array of initial values for the correlation model + hyperparameters/scale parameters. + + The :meth:`fit` method has no returns, although it creates the :py:attr:`beta`, :py:attr:`err_var` and + :py:attr:`C_inv` attributes of the :class:`.Kriging` class. + """ + self.logger.info("UQpy: Running kriging.fit") + + if optimizations_number is not None: + self.optimizations_number = optimizations_number + if correlation_model_parameters is not None: + self.correlation_model_parameters = np.array(correlation_model_parameters) + self.samples = np.array(samples) + + # Number of samples and dimensions of samples and values + nsamples, input_dim = self.samples.shape + output_dim = int(np.size(values) / nsamples) + + self.values = np.array(values).reshape(nsamples, output_dim) + + # Normalizing the data + if self.normalize: + self.sample_mean, self.sample_std = np.mean(self.samples, 0), np.std(self.samples, 0) + self.value_mean, self.value_std = np.mean(self.values, 0), np.std(self.values, 0) + s_ = (self.samples - self.sample_mean) / self.sample_std + y_ = (self.values - self.value_mean) / self.value_std + else: + s_ = self.samples + y_ = self.values + + self.F, jf_ = self.regression_model.r(s_) + + # Maximum Likelihood Estimation : Solving optimization problem to calculate hyperparameters + if self.optimize: + starting_point = self.correlation_model_parameters + + minimizer, fun_value = np.zeros([self.optimizations_number, input_dim]),\ + np.zeros([self.optimizations_number, 1]) + for i__ in range(self.optimizations_number): + p_ = self.optimizer.optimize(function=Kriging.log_likelihood, + initial_guess=starting_point, + args=(self.correlation_model, s_, self.F, y_, self.jac), + jac=self.jac) + print(p_.success) + # print(self.kwargs_optimizer) + minimizer[i__, :] = p_.x + fun_value[i__, 0] = p_.fun + # Generating new starting points using log-uniform distribution + if i__ != self.optimizations_number - 1: + starting_point = stats.reciprocal.rvs([j[0] for j in self.optimizer._bounds], + [j[1] for j in self.optimizer._bounds], 1, + random_state=self.random_state) + print(starting_point) + + if min(fun_value) == np.inf: + raise NotImplementedError("Maximum likelihood estimator failed: Choose different starting point or " + "increase nopt") + t = np.argmin(fun_value) + self.correlation_model_parameters = minimizer[t, :] + + # Updated Correlation matrix corresponding to MLE estimates of hyperparameters + self.R = self.correlation_model.c(x=s_, s=s_, params=self.correlation_model_parameters) + + self.beta, self.gamma, tmp = self._compute_additional_parameters(self.R) + self.C_inv, self.F_dash, self.G, self.err_var = tmp[1], tmp[3], tmp[2], tmp[5] + + self.logger.info("UQpy: kriging fit complete.") + + def _compute_additional_parameters(self, correlation_matrix): + if self.normalize: + y_ = (self.values - self.value_mean) / self.value_std + else: + y_ = self.values + # Compute the regression coefficient (solving this linear equation: F * beta = Y) + # Eq: 3.8, DACE + c = cholesky(correlation_matrix + (10 + self.samples.shape[0]) * 2 ** (-52) * np.eye(self.samples.shape[0]), + lower=True, check_finite=False) + c_inv = np.linalg.inv(c) + f_dash = np.linalg.solve(c, self.F) + y_dash = np.linalg.solve(c, y_) + q_, g_ = np.linalg.qr(f_dash) # Eq: 3.11, DACE + # Check if F is a full rank matrix + if np.linalg.matrix_rank(g_) != min(np.size(self.F, 0), np.size(self.F, 1)): + raise NotImplementedError("Chosen regression functions are not sufficiently linearly independent") + # Design parameters (beta: regression coefficient) + beta = np.linalg.solve(g_, np.matmul(np.transpose(q_), y_dash)) + + # Design parameter (R * gamma = Y - F * beta = residual) + gamma = np.linalg.solve(c.T, (y_dash - np.matmul(f_dash, beta))) + + # Computing the process variance (Eq: 3.13, DACE) + err_var = np.zeros(self.values.shape[1]) + for i in range(self.values.shape[1]): + err_var[i] = (1 / self.samples.shape[0]) * (np.linalg.norm(y_dash[:, i] - + np.matmul(f_dash, beta[:, i])) ** 2) + + return beta, gamma, (c, c_inv, g_, f_dash, y_dash, err_var) + + def predict(self, points: np.ndarray, return_std: bool = False, correlation_model_parameters: list = None): + """ + Predict the model response at new points. + + This method evaluates the regression and correlation model at new sample points. Then, it predicts the function + value and standard deviation. + + :param points: Points at which to predict the model response. + :param return_std: Indicator to estimate standard deviation. + :param correlation_model_parameters: Hyperparameters for correlation model. + :return: Predicted values at the new points, Standard deviation of predicted values at the new points + """ + x_ = np.atleast_2d(points) + if self.normalize: + x_ = (x_ - self.sample_mean) / self.sample_std + s_ = (self.samples - self.sample_mean) / self.sample_std + else: + s_ = self.samples + fx, jf = self.regression_model.r(x_) + if correlation_model_parameters is None: + correlation_model_parameters = self.correlation_model_parameters + rx = self.correlation_model.c( + x=x_, s=s_, params=correlation_model_parameters + ) + if correlation_model_parameters is None: + beta, gamma = self.beta, self.gamma + c_inv, f_dash, g_, err_var = self.C_inv, self.F_dash, self.G, self.err_var + else: + beta, gamma, tmp = self._compute_additional_parameters( + self.correlation_model.c(x=s_, s=s_, params=correlation_model_parameters)) + c_inv, f_dash, g_, err_var = tmp[1], tmp[3], tmp[2], tmp[5] + y = np.einsum("ij,jk->ik", fx, beta) + np.einsum( + "ij,jk->ik", rx, gamma + ) + if self.normalize: + y = self.value_mean + y * self.value_std + if x_.shape[1] == 1: + y = y.flatten() + if return_std: + r_dash = np.matmul(c_inv, rx.T) + u = np.matmul(f_dash.T, r_dash) - fx.T + norm1 = np.linalg.norm(r_dash, 2, 0) + norm2 = np.linalg.norm(np.linalg.solve(g_, u), 2, 0) + mse = np.sqrt(err_var * np.atleast_2d(1 + norm2 - norm1).T) + if self.normalize: + mse = self.value_std * mse + if x_.shape[1] == 1: + mse = mse.flatten() + return y, mse + else: + return y + + def jacobian(self, points: np.ndarray): + """ + Predict the gradient of the model at new points. + + This method evaluates the regression and correlation model at new sample point. Then, it predicts the gradient + using the regression coefficients and the training second_order_tensor. + + :param points: Points at which to evaluate the gradient. + :return: Gradient of the surrogate model evaluated at the new points. + """ + x_ = np.atleast_2d(points) + if self.normalize: + x_ = (x_ - self.sample_mean) / self.sample_std + s_ = (self.samples - self.sample_mean) / self.sample_std + else: + s_ = self.samples + + fx, jf = self.regression_model.r(x_) + rx, drdx = self.correlation_model.c( + x=x_, s=s_, params=self.correlation_model_parameters, dx=True + ) + y_grad = np.einsum("ikj,jm->ik", jf, self.beta) + np.einsum( + "ijk,jm->ki", drdx.T, self.gamma + ) + if self.normalize: + y_grad = y_grad * self.value_std / self.sample_std + if x_.shape[1] == 1: + y_grad = y_grad.flatten() + return y_grad + + @staticmethod + def log_likelihood(p0, cm, s, f, y, return_grad): + # Return the log-likelihood function and it's gradient. Gradient is calculate using Central Difference + m = s.shape[0] + n = s.shape[1] + r__, dr_ = cm.c(x=s, s=s, params=p0, dt=True) + try: + cc = cholesky(r__ + 2 ** (-52) * np.eye(m), lower=True) + except np.linalg.LinAlgError: + return np.inf, np.zeros(n) + + # Product of diagonal terms is negligible sometimes, even when cc exists. + if np.prod(np.diagonal(cc)) == 0: + return np.inf, np.zeros(n) + + cc_inv = np.linalg.inv(cc) + r_inv = np.matmul(cc_inv.T, cc_inv) + f__ = cc_inv.dot(f) + y__ = cc_inv.dot(y) + + q__, g__ = np.linalg.qr(f__) # Eq: 3.11, DACE + + # Check if F is a full rank matrix + if np.linalg.matrix_rank(g__) != min(np.size(f__, 0), np.size(f__, 1)): + raise NotImplementedError( + "Chosen regression functions are not sufficiently linearly independent" + ) + + # Design parameters + beta_ = np.linalg.solve(g__, np.matmul(np.transpose(q__), y__)) + + # Computing the process variance (Eq: 3.13, DACE) + sigma_ = np.zeros(y.shape[1]) + + ll = 0 + for out_dim in range(y.shape[1]): + sigma_[out_dim] = (1 / m) * ( + np.linalg.norm(y__[:, out_dim] - np.matmul(f__, beta_[:, out_dim])) ** 2) + # Objective function:= log(det(sigma**2 * R)) + constant + ll = (ll + ( np.log(np.linalg.det(sigma_[out_dim] * r__)) + m * (np.log(2 * np.pi) + 1)) / 2) + + # Gradient of loglikelihood + # Reference: C. E. Rasmussen & C. K. I. Williams, Gaussian Processes for Machine Learning, the MIT Press, + # 2006, ISBN 026218253X. (Page 114, Eq.(5.9)) + residual = y - np.matmul(f, beta_) + gamma = np.matmul(r_inv, residual) + grad_mle = np.zeros(n) + for in_dim in range(n): + r_inv_derivative = np.matmul(r_inv, np.matmul(dr_[:, :, in_dim], r_inv)) + tmp = np.matmul(residual.T, np.matmul(r_inv_derivative, residual)) + for out_dim in range(y.shape[1]): + alpha = gamma / sigma_[out_dim] + tmp1 = np.matmul(alpha, alpha.T) - r_inv / sigma_[out_dim] + cov_der = sigma_[out_dim] * dr_[:, :, in_dim] + tmp * r__ / m + grad_mle[in_dim] = grad_mle[in_dim] - 0.5 * np.trace( + np.matmul(tmp1, cov_der) + ) + + if return_grad: + return ll, grad_mle + else: + return ll diff --git a/src/UQpy/surrogates/kriging/__init__.py b/src/UQpy/surrogates/kriging/__init__.py new file mode 100644 index 000000000..55a50199d --- /dev/null +++ b/src/UQpy/surrogates/kriging/__init__.py @@ -0,0 +1,4 @@ +from UQpy.surrogates.kriging.Kriging import Kriging + +from UQpy.surrogates.kriging.regression_models import * +from UQpy.surrogates.kriging.correlation_models import * diff --git a/src/UQpy/surrogates/kriging/correlation_models/CubicCorrelation.py b/src/UQpy/surrogates/kriging/correlation_models/CubicCorrelation.py new file mode 100644 index 000000000..3e909507f --- /dev/null +++ b/src/UQpy/surrogates/kriging/correlation_models/CubicCorrelation.py @@ -0,0 +1,28 @@ +from UQpy.surrogates.kriging.correlation_models.baseclass.Correlation import * + + +class CubicCorrelation(Correlation): + def c(self, x, s, params, dt=False, dx=False): + zeta_matrix, dtheta_derivs, dx_derivs = Correlation.derivatives( + x_=x, s_=s, params=params + ) + # Initial matrices containing derivates for all values in array. Note since + # dtheta_s and dx_s already accounted for where derivative should be zero, all + # that must be done is multiplying the |dij| or thetaj matrix on top of a + # matrix of derivates w.r.t zeta (in this case, dzeta = -6zeta+6zeta**2) + drdt = (-6 * zeta_matrix + 6 * zeta_matrix ** 2) * dtheta_derivs + drdx = (-6 * zeta_matrix + 6 * zeta_matrix ** 2) * dx_derivs + # Also, create matrix for values of equation, 1 - 3zeta**2 + 2zeta**3, for loop + zeta_function_cubic = 1 - 3 * zeta_matrix ** 2 + 2 * zeta_matrix ** 3 + rx = np.prod(zeta_function_cubic, 2) + if dt: + # Same as previous example, loop over zeta matrix by shifting index + for i in range(len(params) - 1): + drdt = drdt * np.roll(zeta_function_cubic, i + 1, axis=2) + return rx, drdt + if dx: + # Same as previous example, loop over zeta matrix by shifting index + for i in range(len(params) - 1): + drdx = drdx * np.roll(zeta_function_cubic, i + 1, axis=2) + return rx, drdx + return rx diff --git a/src/UQpy/surrogates/kriging/correlation_models/ExponentialCorrelation.py b/src/UQpy/surrogates/kriging/correlation_models/ExponentialCorrelation.py new file mode 100644 index 000000000..94702760c --- /dev/null +++ b/src/UQpy/surrogates/kriging/correlation_models/ExponentialCorrelation.py @@ -0,0 +1,20 @@ +from UQpy.surrogates.kriging.correlation_models.baseclass.Correlation import * + + +class ExponentialCorrelation(Correlation): + def c(self, x, s, params, dt=False, dx=False): + stack = Correlation.check_samples_and_return_stack(x, s) + rx = np.exp(np.sum(-params * abs(stack), axis=2)) + if dt: + drdt = -abs(stack) * np.transpose( + np.tile(rx, (np.size(x, 1), 1, 1)), (1, 2, 0) + ) + return rx, drdt + if dx: + drdx = ( + -params + * np.sign(stack) + * np.transpose(np.tile(rx, (np.size(x, 1), 1, 1)), (1, 2, 0)) + ) + return rx, drdx + return rx diff --git a/src/UQpy/surrogates/kriging/correlation_models/GaussianCorrelation.py b/src/UQpy/surrogates/kriging/correlation_models/GaussianCorrelation.py new file mode 100644 index 000000000..05ce09830 --- /dev/null +++ b/src/UQpy/surrogates/kriging/correlation_models/GaussianCorrelation.py @@ -0,0 +1,21 @@ +from UQpy.surrogates.kriging.correlation_models.baseclass.Correlation import * + + +class GaussianCorrelation(Correlation): + def c(self, x, s, params, dt=False, dx=False): + stack = Correlation.check_samples_and_return_stack(x, s) + rx = np.exp(np.sum(-params * (stack ** 2), axis=2)) + if dt: + drdt = -(stack ** 2) * np.transpose( + np.tile(rx, (np.size(x, 1), 1, 1)), (1, 2, 0) + ) + return rx, drdt + if dx: + drdx = ( + -2 + * params + * stack + * np.transpose(np.tile(rx, (np.size(x, 1), 1, 1)), (1, 2, 0)) + ) + return rx, drdx + return rx diff --git a/src/UQpy/surrogates/kriging/correlation_models/LinearCorrelation.py b/src/UQpy/surrogates/kriging/correlation_models/LinearCorrelation.py new file mode 100644 index 000000000..69d7f1506 --- /dev/null +++ b/src/UQpy/surrogates/kriging/correlation_models/LinearCorrelation.py @@ -0,0 +1,35 @@ +from UQpy.surrogates.kriging.correlation_models.baseclass.Correlation import * + + +class LinearCorrelation(Correlation): + def c(self, x, s, params, dt=False, dx=False): + stack = Correlation.check_samples_and_return_stack(x, s) + # Taking stack and turning each d value into 1-theta*dij + after_parameters = 1 - params * abs(stack) + # Define matrix of zeros to compare against (not necessary to be defined separately, + # but the line is bulky if this isn't defined first, and it is used more than once) + comp_zero = np.zeros((np.size(x, 0), np.size(s, 0), np.size(s, 1))) + # Compute matrix of max{0,1-theta*d} + max_matrix = np.maximum(after_parameters, comp_zero) + rx = np.prod(max_matrix, 2) + # Create matrix that has 1s where max_matrix is nonzero + # -Essentially, this acts as a way to store the indices of where the values are nonzero + ones_and_zeros = max_matrix.astype(bool).astype(int) + # Set initial derivatives as if all were positive + first_dtheta = -abs(stack) + first_dx = np.negative(params) * np.sign(stack) + # Multiply derivs by ones_and_zeros...this will set the values where the + # derivative should be zero to zero, and keep all other values the same + drdt = np.multiply(first_dtheta, ones_and_zeros) + drdx = np.multiply(first_dx, ones_and_zeros) + if dt: + # Loop over parameters, shifting max_matrix and multiplying over derivative matrix with each iter + for i in range(len(params) - 1): + drdt = drdt * np.roll(max_matrix, i + 1, axis=2) + return rx, drdt + if dx: + # Loop over parameters, shifting max_matrix and multiplying over derivative matrix with each iter + for i in range(len(params) - 1): + drdx = drdx * np.roll(max_matrix, i + 1, axis=2) + return rx, drdx + return rx diff --git a/src/UQpy/surrogates/kriging/correlation_models/SphericalCorrelation.py b/src/UQpy/surrogates/kriging/correlation_models/SphericalCorrelation.py new file mode 100644 index 000000000..1f6b8173d --- /dev/null +++ b/src/UQpy/surrogates/kriging/correlation_models/SphericalCorrelation.py @@ -0,0 +1,28 @@ +from UQpy.surrogates.kriging.correlation_models.baseclass.Correlation import * + + +class SphericalCorrelation(Correlation): + def c(self, x, s, params, dt=False, dx=False): + zeta_matrix, dtheta_derivs, dx_derivs = Correlation.derivatives( + x_=x, s_=s, params=params + ) + # Initial matrices containing derivates for all values in array. Note since + # dtheta_s and dx_s already accounted for where derivative should be zero, all + # that must be done is multiplying the |dij| or thetaj matrix on top of a + # matrix of derivates w.r.t zeta (in this case, dzeta = -1.5+1.5zeta**2) + drdt = (-1.5 + 1.5 * zeta_matrix ** 2) * dtheta_derivs + drdx = (-1.5 + 1.5 * zeta_matrix ** 2) * dx_derivs + # Also, create matrix for values of equation, 1 - 1.5zeta + 0.5zeta**3, for loop + zeta_function = 1 - 1.5 * zeta_matrix + 0.5 * zeta_matrix ** 3 + rx = np.prod(zeta_function, 2) + if dt: + # Same as previous example, loop over zeta matrix by shifting index + for i in range(len(params) - 1): + drdt = drdt * np.roll(zeta_function, i + 1, axis=2) + return rx, drdt + if dx: + # Same as previous example, loop over zeta matrix by shifting index + for i in range(len(params) - 1): + drdx = drdx * np.roll(zeta_function, i + 1, axis=2) + return rx, drdx + return rx diff --git a/src/UQpy/surrogates/kriging/correlation_models/SplineCorrelation.py b/src/UQpy/surrogates/kriging/correlation_models/SplineCorrelation.py new file mode 100644 index 000000000..0aa6282d1 --- /dev/null +++ b/src/UQpy/surrogates/kriging/correlation_models/SplineCorrelation.py @@ -0,0 +1,58 @@ +from UQpy.surrogates.kriging.correlation_models.baseclass.Correlation import * + + +class SplineCorrelation(Correlation): + def c(self, x, s, params, dt=False, dx=False): + # x_, s_ = np.atleast_2d(x_), np.atleast_2d(s_) + # # Create stack matrix, where each block is x_i with all s + # stack = np.tile(np.swapaxes(np.atleast_3d(x_), 1, 2), (1, np.size(s_, 0), 1)) - np.tile(s_, ( + # np.size(x_, 0), + # 1, 1)) + stack = Correlation.check_samples_and_return_stack(x, s) + # In this case, the zeta value is just abs(stack)*parameters, no comparison + zeta_matrix = abs(stack) * params + # So, dtheta and dx are just |dj| and theta*sgn(dj), respectively + dtheta_derivs = abs(stack) + # dx_derivs = np.ones((np.size(x,0),np.size(s,0),np.size(s,1)))*parameters + dx_derivs = np.sign(stack) * params + + # Initialize empty sigma and dsigma matrices + sigma = np.ones( + (zeta_matrix.shape[0], zeta_matrix.shape[1], zeta_matrix.shape[2]) + ) + dsigma = np.ones( + (zeta_matrix.shape[0], zeta_matrix.shape[1], zeta_matrix.shape[2]) + ) + + # Loop over cases to create zeta_matrix and subsequent dR matrices + for i in range(zeta_matrix.shape[0]): + for j in range(zeta_matrix.shape[1]): + for k in range(zeta_matrix.shape[2]): + y = zeta_matrix[i, j, k] + if 0 <= y <= 0.2: + sigma[i, j, k] = 1 - 15 * y ** 2 + 30 * y ** 3 + dsigma[i, j, k] = -30 * y + 90 * y ** 2 + elif 0.2 < y < 1.0: + sigma[i, j, k] = 1.25 * (1 - y) ** 3 + dsigma[i, j, k] = 3.75 * (1 - y) ** 2 * -1 + elif y >= 1: + sigma[i, j, k] = 0 + dsigma[i, j, k] = 0 + + rx = np.prod(sigma, 2) + + if dt: + # Initialize derivative matrices incorporating chain rule + drdt = dsigma * dtheta_derivs + # Loop over to create proper matrices + for i in range(len(params) - 1): + drdt = drdt * np.roll(sigma, i + 1, axis=2) + return rx, drdt + if dx: + # Initialize derivative matrices incorporating chain rule + drdx = dsigma * dx_derivs + # Loop over to create proper matrices + for i in range(len(params) - 1): + drdx = drdx * np.roll(sigma, i + 1, axis=2) + return rx, drdx + return rx diff --git a/src/UQpy/surrogates/kriging/correlation_models/__init__.py b/src/UQpy/surrogates/kriging/correlation_models/__init__.py new file mode 100644 index 000000000..10f39dafc --- /dev/null +++ b/src/UQpy/surrogates/kriging/correlation_models/__init__.py @@ -0,0 +1,7 @@ +from UQpy.surrogates.kriging.correlation_models.baseclass import * +from UQpy.surrogates.kriging.correlation_models.CubicCorrelation import CubicCorrelation +from UQpy.surrogates.kriging.correlation_models.ExponentialCorrelation import ExponentialCorrelation +from UQpy.surrogates.kriging.correlation_models.GaussianCorrelation import GaussianCorrelation +from UQpy.surrogates.kriging.correlation_models.LinearCorrelation import LinearCorrelation +from UQpy.surrogates.kriging.correlation_models.SphericalCorrelation import SphericalCorrelation +from UQpy.surrogates.kriging.correlation_models.SplineCorrelation import SplineCorrelation diff --git a/src/UQpy/surrogates/kriging/correlation_models/baseclass/Correlation.py b/src/UQpy/surrogates/kriging/correlation_models/baseclass/Correlation.py new file mode 100644 index 000000000..703461b5f --- /dev/null +++ b/src/UQpy/surrogates/kriging/correlation_models/baseclass/Correlation.py @@ -0,0 +1,47 @@ +from abc import ABC, abstractmethod +import numpy as np + + +class Correlation(ABC): + """ + Abstract base class of all Correlations. Serves as a template for creating new Kriging correlation + functions. + """ + + @abstractmethod + def c(self, x, s, params, dt=False, dx=False): + """ + Abstract method that needs to be implemented by the user when creating a new Correlation function. + """ + pass + + @staticmethod + def check_samples_and_return_stack(x, s): + x_, s_ = np.atleast_2d(x), np.atleast_2d(s) + # Create stack matrix, where each block is x_i with all s + stack = np.tile( + np.swapaxes(np.atleast_3d(x_), 1, 2), (1, np.size(s_, 0), 1) + ) - np.tile(s_, (np.size(x_, 0), 1, 1)) + return stack + + @staticmethod + def derivatives(x_, s_, params): + stack = Correlation.check_samples_and_return_stack(x_, s_) + # Taking stack and creating array of all thetaj*dij + after_parameters = params * abs(stack) + # Create matrix of all ones to compare + comp_ones = np.ones((np.size(x_, 0), np.size(s_, 0), np.size(s_, 1))) + # zeta_matrix has all values min{1,theta*dij} + zeta_matrix_ = np.minimum(after_parameters, comp_ones) + # Copy zeta_matrix to another matrix that will used to find where derivative should be zero + indices = zeta_matrix_.copy() + # If value of min{1,theta*dij} is 1, the derivative should be 0. + # So, replace all values of 1 with 0, then perform the .astype(bool).astype(int) + # operation like in the linear example, so you end up with an array of 1's where + # the derivative should be caluclated and 0 where it should be zero + indices[indices == 1] = 0 + # Create matrix of all |dij| (where non zero) to be used in calculation of dR/dtheta + dtheta_derivs_ = indices.astype(bool).astype(int) * abs(stack) + # Same as above, but for matrix of all thetaj where non-zero + dx_derivs_ = indices.astype(bool).astype(int) * params * np.sign(stack) + return zeta_matrix_, dtheta_derivs_, dx_derivs_ diff --git a/src/UQpy/surrogates/kriging/correlation_models/baseclass/__init__.py b/src/UQpy/surrogates/kriging/correlation_models/baseclass/__init__.py new file mode 100644 index 000000000..e8cf1815d --- /dev/null +++ b/src/UQpy/surrogates/kriging/correlation_models/baseclass/__init__.py @@ -0,0 +1 @@ +from UQpy.surrogates.kriging.correlation_models.baseclass.Correlation import Correlation diff --git a/src/UQpy/surrogates/kriging/regression_models/ConstantRegression.py b/src/UQpy/surrogates/kriging/regression_models/ConstantRegression.py new file mode 100644 index 000000000..0e4f9e984 --- /dev/null +++ b/src/UQpy/surrogates/kriging/regression_models/ConstantRegression.py @@ -0,0 +1,10 @@ +import numpy as np +from UQpy.surrogates.kriging.regression_models.baseclass.Regression import Regression + + +class ConstantRegression(Regression): + def r(self, s): + s = np.atleast_2d(s) + fx = np.ones([np.size(s, 0), 1]) + jf = np.zeros([np.size(s, 0), np.size(s, 1), 1]) + return fx, jf diff --git a/src/UQpy/surrogates/kriging/regression_models/LinearRegression.py b/src/UQpy/surrogates/kriging/regression_models/LinearRegression.py new file mode 100644 index 000000000..118d8d73c --- /dev/null +++ b/src/UQpy/surrogates/kriging/regression_models/LinearRegression.py @@ -0,0 +1,12 @@ +import numpy as np +from UQpy.surrogates.kriging.regression_models.baseclass.Regression import Regression + + +class LinearRegression(Regression): + def r(self, s): + s = np.atleast_2d(s) + fx = np.concatenate((np.ones([np.size(s, 0), 1]), s), 1) + jf_b = np.zeros([np.size(s, 0), np.size(s, 1), np.size(s, 1)]) + np.einsum("jii->ji", jf_b)[:] = 1 + jf = np.concatenate((np.zeros([np.size(s, 0), np.size(s, 1), 1]), jf_b), 2) + return fx, jf diff --git a/src/UQpy/surrogates/kriging/regression_models/QuadraticRegression.py b/src/UQpy/surrogates/kriging/regression_models/QuadraticRegression.py new file mode 100644 index 000000000..fdddefbb5 --- /dev/null +++ b/src/UQpy/surrogates/kriging/regression_models/QuadraticRegression.py @@ -0,0 +1,37 @@ +import numpy as np +from UQpy.surrogates.kriging.regression_models.baseclass.Regression import Regression + + +class QuadraticRegression(Regression): + def r(self, s): + s = np.atleast_2d(s) + fx = np.zeros( + [np.size(s, 0), int((np.size(s, 1) + 1) * (np.size(s, 1) + 2) / 2)] + ) + jf = np.zeros( + [ + np.size(s, 0), + np.size(s, 1), + int((np.size(s, 1) + 1) * (np.size(s, 1) + 2) / 2), + ] + ) + for i in range(np.size(s, 0)): + temp = np.hstack((1, s[i, :])) + for j in range(np.size(s, 1)): + temp = np.hstack((temp, s[i, j] * s[i, j::])) + fx[i, :] = temp + # definie H matrix + h_ = 0 + for j in range(np.size(s, 1)): + tmp_ = s[i, j] * np.eye(np.size(s, 1)) + t1 = np.zeros([np.size(s, 1), np.size(s, 1)]) + t1[j, :] = s[i, :] + tmp = tmp_ + t1 + if j == 0: + h_ = tmp[:, j::] + else: + h_ = np.hstack((h_, tmp[:, j::])) + jf[i, :, :] = np.hstack( + (np.zeros([np.size(s, 1), 1]), np.eye(np.size(s, 1)), h_) + ) + return fx, jf diff --git a/src/UQpy/surrogates/kriging/regression_models/__init__.py b/src/UQpy/surrogates/kriging/regression_models/__init__.py new file mode 100644 index 000000000..e6da265b3 --- /dev/null +++ b/src/UQpy/surrogates/kriging/regression_models/__init__.py @@ -0,0 +1,4 @@ +from UQpy.surrogates.kriging.regression_models.baseclass import * +from UQpy.surrogates.kriging.regression_models.ConstantRegression import ConstantRegression +from UQpy.surrogates.kriging.regression_models.LinearRegression import LinearRegression +from UQpy.surrogates.kriging.regression_models.QuadraticRegression import QuadraticRegression diff --git a/src/UQpy/surrogates/kriging/regression_models/baseclass/Regression.py b/src/UQpy/surrogates/kriging/regression_models/baseclass/Regression.py new file mode 100644 index 000000000..3d435e51d --- /dev/null +++ b/src/UQpy/surrogates/kriging/regression_models/baseclass/Regression.py @@ -0,0 +1,14 @@ +from abc import ABC, abstractmethod + + +class Regression(ABC): + """ + Abstract base class of all Regressions. Serves as a template for creating new Kriging regression + functions. + """ + @abstractmethod + def r(self, s): + """ + Abstract method that needs to be implemented by the user when creating a new Regression function. + """ + pass diff --git a/src/UQpy/surrogates/kriging/regression_models/baseclass/__init__.py b/src/UQpy/surrogates/kriging/regression_models/baseclass/__init__.py new file mode 100644 index 000000000..004e95e23 --- /dev/null +++ b/src/UQpy/surrogates/kriging/regression_models/baseclass/__init__.py @@ -0,0 +1 @@ +from UQpy.surrogates.kriging.regression_models.baseclass.Regression import Regression From 5a9c338aea3695e0488288981b9fe069c1dec1f3 Mon Sep 17 00:00:00 2001 From: Dimitris Tsapetis Date: Thu, 10 Nov 2022 10:00:06 -0500 Subject: [PATCH 75/88] Fixes Nataf Stability issue by adding an n_gauss_points optional parameter to distortion_z2x function --- .../baseclass/LearningFunction.py | 2 +- src/UQpy/transformations/Nataf.py | 63 ++++++++++--------- 2 files changed, 34 insertions(+), 31 deletions(-) diff --git a/src/UQpy/sampling/adaptive_kriging_functions/baseclass/LearningFunction.py b/src/UQpy/sampling/adaptive_kriging_functions/baseclass/LearningFunction.py index 1da00c410..e754dfb01 100644 --- a/src/UQpy/sampling/adaptive_kriging_functions/baseclass/LearningFunction.py +++ b/src/UQpy/sampling/adaptive_kriging_functions/baseclass/LearningFunction.py @@ -4,7 +4,7 @@ class LearningFunction(ABC): def __init(self, ordered_parameters=None, **kwargs): self.parameters = kwargs - self.ordered_parameters = (ordered_parameters if not None else tuple(kwargs.keys())) + self.ordered_parameters = (ordered_parameters if ordered_parameters is not None else tuple(kwargs.keys())) if len(self.ordered_parameters) != len(self.parameters): raise ValueError("Inconsistent dimensions between order_params tuple and params dictionary.") diff --git a/src/UQpy/transformations/Nataf.py b/src/UQpy/transformations/Nataf.py index 54c4075ca..bf984c292 100644 --- a/src/UQpy/transformations/Nataf.py +++ b/src/UQpy/transformations/Nataf.py @@ -21,17 +21,17 @@ class Nataf: @beartype def __init__( - self, - distributions: Union[Distribution, DistributionList], - samples_x: Union[None, np.ndarray] = None, - samples_z: Union[None, np.ndarray] = None, - jacobian: bool = False, - corr_z: Union[None, np.ndarray] = None, - corr_x: Union[None, np.ndarray] = None, - itam_beta: Union[float, int] = 1.0, - itam_threshold1: Union[float, int] = 0.001, - itam_threshold2: Union[float, int] = 0.1, - itam_max_iter: int = 100, + self, + distributions: Union[Distribution, DistributionList], + samples_x: Union[None, np.ndarray] = None, + samples_z: Union[None, np.ndarray] = None, + jacobian: bool = False, + corr_z: Union[None, np.ndarray] = None, + corr_x: Union[None, np.ndarray] = None, + itam_beta: Union[float, int] = 1.0, + itam_threshold1: Union[float, int] = 0.001, + itam_threshold2: Union[float, int] = 0.1, + itam_max_iter: int = 100, ): """ Transform random variables using the Nataf or Inverse Nataf transformation @@ -73,7 +73,7 @@ def __init__( self.dist_object = distributions self.samples_x: NumpyFloatArray = samples_x """Random vector of shape ``(nsamples, n_dimensions)`` with prescribed probability distributions.""" - self.samples_z:NumpyFloatArray = samples_z + self.samples_z: NumpyFloatArray = samples_z """Standard normal random vector of shape ``(nsamples, n_dimensions)``""" self.jacobian = jacobian self.jzx: NumpyFloatArray = None @@ -98,9 +98,9 @@ def __init__( elif all(isinstance(x, Normal) for x in distributions): self.corr_z = self.corr_x else: - self.corr_z, self.itam_error1, self.itam_error2 =\ + self.corr_z, self.itam_error1, self.itam_error2 = \ self.itam(self.dist_object, self.corr_x, self.itam_max_iter, self.itam_beta, - self.itam_threshold1, self.itam_threshold2,) + self.itam_threshold1, self.itam_threshold2, ) elif corr_z is not None: self.corr_z = corr_z if np.all(np.equal(self.corr_z, np.eye(self.n_dimensions))): @@ -119,10 +119,10 @@ def __init__( @beartype def run( - self, - samples_x: Union[None, np.ndarray] = None, - samples_z: Union[None, np.ndarray] = None, - jacobian: bool = False, + self, + samples_x: Union[None, np.ndarray] = None, + samples_z: Union[None, np.ndarray] = None, + jacobian: bool = False, ): """ Execute the Nataf transformation or its inverse. @@ -160,15 +160,15 @@ def run( @staticmethod def itam( - distributions: Union[ - DistributionContinuous1D, - JointIndependent, - list[Union[DistributionContinuous1D, JointIndependent]]], - corr_x, - itam_max_iter: int = 100, - itam_beta: Union[float, int] = 1.0, - itam_threshold1: Union[float, int] = 0.001, - itam_threshold2: Union[float, int] = 0.01, + distributions: Union[ + DistributionContinuous1D, + JointIndependent, + list[Union[DistributionContinuous1D, JointIndependent]]], + corr_x, + itam_max_iter: int = 100, + itam_beta: Union[float, int] = 1.0, + itam_threshold1: Union[float, int] = 0.001, + itam_threshold2: Union[float, int] = 0.01, ): """ Calculate the correlation matrix :math:`\mathbf{C_Z}` of the standard normal random vector @@ -236,7 +236,8 @@ def itam( return corr_z, itam_error1, itam_error2 @staticmethod - def distortion_z2x(distributions: Union[Distribution, list[Distribution]], corr_z: np.ndarray): + def distortion_z2x(distributions: Union[Distribution, list[Distribution]], corr_z: np.ndarray, + n_gauss_points: int = 1024): """ This is a method to calculate the correlation matrix :math:`\mathbf{C_x}` of the random vector :math:`\mathbf{x}` given the correlation matrix :math:`\mathbf{C_z}` of the standard normal random vector @@ -248,12 +249,14 @@ def distortion_z2x(distributions: Union[Distribution, list[Distribution]], corr This method is part of the :class:`.Nataf` class. :param corr_z: The correlation matrix (:math:`\mathbf{C_z}`) of the standard normal vector **Z** . Default: The ``identity`` matrix. + :param n_gauss_points: The number of integration points used for the numerical integration of the + correlation matrix (:math:`\mathbf{C_Z}`) of the standard normal random vector **Z** :return: Distorted correlation matrix (:math:`\mathbf{C_X}`) of the random vector **X**. """ logger = logging.getLogger(__name__) z_max = 8 z_min = -z_max - ng = 128 + ng = n_gauss_points eta, w2d, xi = calculate_gauss_quadrature_2d(ng, z_max, z_min) @@ -268,7 +271,7 @@ def distortion_z2x(distributions: Union[Distribution, list[Distribution]], corr @staticmethod def calculate_corr_x(corr_x, corr_z, marginals, eta, w2d, xi, is_joint): if all(hasattr(m, "moments") for m in marginals) and all( - hasattr(m, "icdf") for m in marginals + hasattr(m, "icdf") for m in marginals ): for i in range(len(marginals)): i_cdf_i = marginals[i].icdf From 10a38229207b0d579b7453e0ed5a6ab469e356c3 Mon Sep 17 00:00:00 2001 From: Dimitris Tsapetis Date: Thu, 10 Nov 2022 12:05:46 -0500 Subject: [PATCH 76/88] Fixes matplotlib 3d projection issue --- docs/code/surrogates/gpr/plot_gpr_custom2D.py | 2 +- docs/code/surrogates/pce/plot_pce_camel.py | 2 +- docs/code/surrogates/pce/plot_pce_sphere.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/code/surrogates/gpr/plot_gpr_custom2D.py b/docs/code/surrogates/gpr/plot_gpr_custom2D.py index 97820ea8c..764130894 100644 --- a/docs/code/surrogates/gpr/plot_gpr_custom2D.py +++ b/docs/code/surrogates/gpr/plot_gpr_custom2D.py @@ -112,7 +112,7 @@ y_act = np.array(r2model.qoi_list).reshape(x1g.shape[0], x1g.shape[1]) fig1 = plt.figure() -ax = fig1.gca(projection='3d') +ax = fig1.add_subplot(projection='3d') surf = ax.plot_surface(x1g, x2g, y_act, cmap=cm.coolwarm, linewidth=0, antialiased=False) ax.set_zlim(-1, 15) ax.zaxis.set_major_locator(LinearLocator(10)) diff --git a/docs/code/surrogates/pce/plot_pce_camel.py b/docs/code/surrogates/pce/plot_pce_camel.py index 27c3e7ed6..8a6adffd2 100644 --- a/docs/code/surrogates/pce/plot_pce_camel.py +++ b/docs/code/surrogates/pce/plot_pce_camel.py @@ -76,7 +76,7 @@ def function(x, y): f = function(X1_, X2_) fig = plt.figure(figsize=(10, 6)) -ax = fig.gca(projection='3d') +ax = fig.add_subplot(projection='3d') surf = ax.plot_surface(X1_, X2_, f, rstride=1, cstride=1, cmap='gnuplot2', linewidth=0, antialiased=False) ax.set_title('True function') ax.set_xlabel('$x_1$', fontsize=15) diff --git a/docs/code/surrogates/pce/plot_pce_sphere.py b/docs/code/surrogates/pce/plot_pce_sphere.py index 57382c7e9..a81e32663 100644 --- a/docs/code/surrogates/pce/plot_pce_sphere.py +++ b/docs/code/surrogates/pce/plot_pce_sphere.py @@ -71,7 +71,7 @@ def function(x,y): f = function(X1_, X2_) fig = plt.figure(figsize=(10,6)) -ax = fig.gca(projection='3d') +ax = fig.add_subplot(projection='3d') surf = ax.plot_surface(X1_, X2_, f, rstride=1, cstride=1, cmap='gnuplot2', linewidth=0, antialiased=False) ax.set_title('True function') ax.set_xlabel('$x_1$', fontsize=15) From e1cee93b4b965766baf84adae2d22f2dbf8f0382 Mon Sep 17 00:00:00 2001 From: Dimitris Tsapetis Date: Thu, 10 Nov 2022 12:27:41 -0500 Subject: [PATCH 77/88] Fixes matplotlib 3d projection issue --- docs/code/surrogates/pce/plot_pce_camel.py | 4 ++-- docs/code/surrogates/pce/plot_pce_sphere.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/code/surrogates/pce/plot_pce_camel.py b/docs/code/surrogates/pce/plot_pce_camel.py index 8a6adffd2..7e0a0cafb 100644 --- a/docs/code/surrogates/pce/plot_pce_camel.py +++ b/docs/code/surrogates/pce/plot_pce_camel.py @@ -95,7 +95,7 @@ def function(x, y): # %% fig = plt.figure(figsize=(10, 6)) -ax = fig.gca(projection='3d') +ax = fig.add_subplot(projection='3d') ax.scatter(x[:, 0], x[:, 1], y, s=20, c='r') ax.set_title('Training data') @@ -168,7 +168,7 @@ def function(x, y): # %% fig = plt.figure(figsize=(10,6)) -ax = fig.gca(projection='3d') +ax = fig.add_subplot(projection='3d') ax.scatter(x_test[:,0], x_test[:,1], y_test, s=1) ax.set_title('PCE predictor') diff --git a/docs/code/surrogates/pce/plot_pce_sphere.py b/docs/code/surrogates/pce/plot_pce_sphere.py index a81e32663..1c6e42dde 100644 --- a/docs/code/surrogates/pce/plot_pce_sphere.py +++ b/docs/code/surrogates/pce/plot_pce_sphere.py @@ -90,7 +90,7 @@ def function(x,y): # %% fig = plt.figure(figsize=(10,6)) -ax = fig.gca(projection='3d') +ax = fig.add_subplot(projection='3d') ax.scatter(x[:,0], x[:,1], y, s=20, c='r') ax.set_title('Training data') @@ -156,7 +156,7 @@ def function(x,y): # %% fig = plt.figure(figsize=(10,6)) -ax = fig.gca(projection='3d') +ax = fig.add_subplot(projection='3d') ax.scatter(x_test[:,0], x_test[:,1], y_test, s=1) ax.set_title('PCE predictor') From 293f374e5516a96ead5309a0c93c4dec05e5ce92 Mon Sep 17 00:00:00 2001 From: Dimitris Tsapetis Date: Thu, 10 Nov 2022 12:56:01 -0500 Subject: [PATCH 78/88] Test fix --- src/UQpy/transformations/Nataf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/UQpy/transformations/Nataf.py b/src/UQpy/transformations/Nataf.py index bf984c292..90c3adf2c 100644 --- a/src/UQpy/transformations/Nataf.py +++ b/src/UQpy/transformations/Nataf.py @@ -108,7 +108,7 @@ def __init__( elif all(isinstance(x, Normal) for x in distributions): self.corr_x = self.corr_z else: - self.corr_x = self.distortion_z2x(self.dist_object, self.corr_z) + self.corr_x = self.distortion_z2x(self.dist_object, self.corr_z, n_gauss_points=128) self.H: NumpyFloatArray = cholesky(self.corr_z, lower=True) """The lower triangular matrix resulting from the Cholesky decomposition of the correlation matrix From 80c3b0333143025d4a5adcbdd6e080d79d2b3e07 Mon Sep 17 00:00:00 2001 From: Dimitris Tsapetis Date: Mon, 21 Nov 2022 11:54:22 -0500 Subject: [PATCH 79/88] Removes old kriging files --- src/UQpy/reliability/taylor_series/FORM.py | 2 +- src/UQpy/surrogates/__init__.py | 1 - src/UQpy/surrogates/kriging/Kriging.py | 354 ------------------ src/UQpy/surrogates/kriging/__init__.py | 4 - .../correlation_models/CubicCorrelation.py | 28 -- .../ExponentialCorrelation.py | 20 - .../correlation_models/GaussianCorrelation.py | 21 -- .../correlation_models/LinearCorrelation.py | 35 -- .../SphericalCorrelation.py | 28 -- .../correlation_models/SplineCorrelation.py | 58 --- .../kriging/correlation_models/__init__.py | 7 - .../baseclass/Correlation.py | 47 --- .../correlation_models/baseclass/__init__.py | 1 - .../regression_models/ConstantRegression.py | 10 - .../regression_models/LinearRegression.py | 12 - .../regression_models/QuadraticRegression.py | 37 -- .../kriging/regression_models/__init__.py | 4 - .../regression_models/baseclass/Regression.py | 14 - .../regression_models/baseclass/__init__.py | 1 - .../sampling/test_adaptive_kriging.py | 1 - .../sampling/test_refined_stratified.py | 1 - tests/unit_tests/surrogates/test_kriging.py | 198 ---------- 22 files changed, 1 insertion(+), 883 deletions(-) delete mode 100755 src/UQpy/surrogates/kriging/Kriging.py delete mode 100644 src/UQpy/surrogates/kriging/__init__.py delete mode 100644 src/UQpy/surrogates/kriging/correlation_models/CubicCorrelation.py delete mode 100644 src/UQpy/surrogates/kriging/correlation_models/ExponentialCorrelation.py delete mode 100644 src/UQpy/surrogates/kriging/correlation_models/GaussianCorrelation.py delete mode 100644 src/UQpy/surrogates/kriging/correlation_models/LinearCorrelation.py delete mode 100644 src/UQpy/surrogates/kriging/correlation_models/SphericalCorrelation.py delete mode 100644 src/UQpy/surrogates/kriging/correlation_models/SplineCorrelation.py delete mode 100644 src/UQpy/surrogates/kriging/correlation_models/__init__.py delete mode 100644 src/UQpy/surrogates/kriging/correlation_models/baseclass/Correlation.py delete mode 100644 src/UQpy/surrogates/kriging/correlation_models/baseclass/__init__.py delete mode 100644 src/UQpy/surrogates/kriging/regression_models/ConstantRegression.py delete mode 100644 src/UQpy/surrogates/kriging/regression_models/LinearRegression.py delete mode 100644 src/UQpy/surrogates/kriging/regression_models/QuadraticRegression.py delete mode 100644 src/UQpy/surrogates/kriging/regression_models/__init__.py delete mode 100644 src/UQpy/surrogates/kriging/regression_models/baseclass/Regression.py delete mode 100644 src/UQpy/surrogates/kriging/regression_models/baseclass/__init__.py delete mode 100644 tests/unit_tests/surrogates/test_kriging.py diff --git a/src/UQpy/reliability/taylor_series/FORM.py b/src/UQpy/reliability/taylor_series/FORM.py index a3cd250fe..dee48a4fc 100644 --- a/src/UQpy/reliability/taylor_series/FORM.py +++ b/src/UQpy/reliability/taylor_series/FORM.py @@ -312,7 +312,7 @@ def run(self, seed_x: Union[list, np.ndarray] = None, else: k = k + 1 - self.logger.error("Error: %s", error_record[-1]) + self.logger.info("Error: %s", error_record[-1]) if converged is True or k > self.n_iterations: break diff --git a/src/UQpy/surrogates/__init__.py b/src/UQpy/surrogates/__init__.py index 168ebdbfa..fe76e50e2 100644 --- a/src/UQpy/surrogates/__init__.py +++ b/src/UQpy/surrogates/__init__.py @@ -1,6 +1,5 @@ from UQpy.surrogates.polynomial_chaos import * from UQpy.surrogates.stochastic_reduced_order_models import * -from UQpy.surrogates.kriging import * from UQpy.surrogates.gaussian_process import * from UQpy.surrogates.baseclass import * diff --git a/src/UQpy/surrogates/kriging/Kriging.py b/src/UQpy/surrogates/kriging/Kriging.py deleted file mode 100755 index 19cbfaf1d..000000000 --- a/src/UQpy/surrogates/kriging/Kriging.py +++ /dev/null @@ -1,354 +0,0 @@ -import logging -from typing import Callable - -import numpy as np -from scipy.linalg import cholesky -import scipy.stats as stats -from beartype import beartype - -from UQpy.utilities import MinimizeOptimizer -from UQpy.utilities.Utilities import process_random_state -from UQpy.surrogates.baseclass.Surrogate import Surrogate -from UQpy.utilities.ValidationTypes import RandomStateType -from UQpy.surrogates.kriging.correlation_models.baseclass.Correlation import Correlation -from UQpy.surrogates.kriging.regression_models.baseclass.Regression import Regression - - -class Kriging(Surrogate): - @beartype - def __init__( - self, - regression_model: Regression, - correlation_model: Correlation, - correlation_model_parameters: list, - optimizer, - bounds: list = None, - optimize: bool = True, - optimizations_number: int = 1, - normalize: bool = True, - random_state: RandomStateType = None, - ): - """ - Īšriging generates an Gaussian process regression-based surrogate model to predict the model output at new sample - points. - - :param regression_model: `regression_model` specifies and evaluates the basis functions and their coefficients, - which defines the trend of the model. Built-in options: :class:`Constant`, :class:`Linear`, :class:`Quadratic` - :param correlation_model: `correlation_model` specifies and evaluates the correlation function. - Built-in options: :class:`Exponential`, :class:`Gaussian`, :class:`Linear`, :class:`Spherical`, - :class:`Cubic`, :class:`Spline` - :param correlation_model_parameters: List or array of initial values for the correlation model - hyperparameters/scale parameters. - :param bounds: Bounds on the hyperparameters used to solve optimization problem to estimate maximum likelihood - estimator. This should be a closed bound. - Default: :math:`[0.001, 10^7]` for each hyperparameter. - :param optimize: Indicator to solve MLE problem or not. If :any:'True' corr_model_params will be used as initial - solution for optimization problem. Otherwise, correlation_model_parameters will be directly use as the - hyperparamters. - Default: :any:`True`. - :param optimizations_number: Number of times MLE optimization problem is to be solved with a random starting - point. Default: :math:`1`. - :param normalize: Boolean flag used in case data normalization is required. - :param optimizer: Object of the :class:`Optimizer` optimizer used during the Kriging surrogate. - Default: :class:`.MinimizeOptimizer`. - :param random_state: Random seed used to initialize the pseudo-random number generator. If an :any:`int` is - provided, this sets the seed for an object of :class:`numpy.random.RandomState`. Otherwise, the - object itself can be passed directly. - """ - self.regression_model = regression_model - self.correlation_model = correlation_model - self.correlation_model_parameters = np.array(correlation_model_parameters) - self.bounds = bounds - self.optimizer = optimizer - self.optimizations_number = optimizations_number - self.optimize = optimize - self.normalize = normalize - self.logger = logging.getLogger(__name__) - self.random_state = random_state - - # Variables are used outside the __init__ - self.samples = None - self.values = None - self.sample_mean, self.sample_std = None, None - self.value_mean, self.value_std = None, None - self.rmodel, self.cmodel = None, None - self.beta: list = None - """Regression coefficients.""" - self.gamma = None - self.err_var: float = None - """Variance of the Gaussian random process.""" - self.F_dash = None - self.C_inv = None - self.G = None - self.F, self.R = None, None - - if isinstance(self.optimizer, str): - raise ValueError("The optimization function provided a input parameter cannot be None.") - - if optimizer._bounds is None: - optimizer.update_bounds([[0.001, 10 ** 7]] * self.correlation_model_parameters.shape[0]) - - self.jac = optimizer.supports_jacobian() - self.random_state = process_random_state(random_state) - - def fit( - self, - samples, - values, - optimizations_number: int = None, - correlation_model_parameters: list = None, - ): - """ - Fit the surrogate model using the training samples and the corresponding model values. - - The user can run this method multiple time after initiating the :class:`.Kriging` class object. - - This method updates the samples and parameters of the :class:`.Kriging` object. This method uses - `correlation_model_parameters` from previous run as the starting point for MLE problem unless user provides a - new starting point. - - :param samples: :class:`numpy.ndarray` containing the training points. - :param values: :class:`numpy.ndarray` containing the model evaluations at the training points. - :param optimizations_number: number of optimization iterations - :param correlation_model_parameters: List or array of initial values for the correlation model - hyperparameters/scale parameters. - - The :meth:`fit` method has no returns, although it creates the :py:attr:`beta`, :py:attr:`err_var` and - :py:attr:`C_inv` attributes of the :class:`.Kriging` class. - """ - self.logger.info("UQpy: Running kriging.fit") - - if optimizations_number is not None: - self.optimizations_number = optimizations_number - if correlation_model_parameters is not None: - self.correlation_model_parameters = np.array(correlation_model_parameters) - self.samples = np.array(samples) - - # Number of samples and dimensions of samples and values - nsamples, input_dim = self.samples.shape - output_dim = int(np.size(values) / nsamples) - - self.values = np.array(values).reshape(nsamples, output_dim) - - # Normalizing the data - if self.normalize: - self.sample_mean, self.sample_std = np.mean(self.samples, 0), np.std(self.samples, 0) - self.value_mean, self.value_std = np.mean(self.values, 0), np.std(self.values, 0) - s_ = (self.samples - self.sample_mean) / self.sample_std - y_ = (self.values - self.value_mean) / self.value_std - else: - s_ = self.samples - y_ = self.values - - self.F, jf_ = self.regression_model.r(s_) - - # Maximum Likelihood Estimation : Solving optimization problem to calculate hyperparameters - if self.optimize: - starting_point = self.correlation_model_parameters - - minimizer, fun_value = np.zeros([self.optimizations_number, input_dim]),\ - np.zeros([self.optimizations_number, 1]) - for i__ in range(self.optimizations_number): - p_ = self.optimizer.optimize(function=Kriging.log_likelihood, - initial_guess=starting_point, - args=(self.correlation_model, s_, self.F, y_, self.jac), - jac=self.jac) - print(p_.success) - # print(self.kwargs_optimizer) - minimizer[i__, :] = p_.x - fun_value[i__, 0] = p_.fun - # Generating new starting points using log-uniform distribution - if i__ != self.optimizations_number - 1: - starting_point = stats.reciprocal.rvs([j[0] for j in self.optimizer._bounds], - [j[1] for j in self.optimizer._bounds], 1, - random_state=self.random_state) - print(starting_point) - - if min(fun_value) == np.inf: - raise NotImplementedError("Maximum likelihood estimator failed: Choose different starting point or " - "increase nopt") - t = np.argmin(fun_value) - self.correlation_model_parameters = minimizer[t, :] - - # Updated Correlation matrix corresponding to MLE estimates of hyperparameters - self.R = self.correlation_model.c(x=s_, s=s_, params=self.correlation_model_parameters) - - self.beta, self.gamma, tmp = self._compute_additional_parameters(self.R) - self.C_inv, self.F_dash, self.G, self.err_var = tmp[1], tmp[3], tmp[2], tmp[5] - - self.logger.info("UQpy: kriging fit complete.") - - def _compute_additional_parameters(self, correlation_matrix): - if self.normalize: - y_ = (self.values - self.value_mean) / self.value_std - else: - y_ = self.values - # Compute the regression coefficient (solving this linear equation: F * beta = Y) - # Eq: 3.8, DACE - c = cholesky(correlation_matrix + (10 + self.samples.shape[0]) * 2 ** (-52) * np.eye(self.samples.shape[0]), - lower=True, check_finite=False) - c_inv = np.linalg.inv(c) - f_dash = np.linalg.solve(c, self.F) - y_dash = np.linalg.solve(c, y_) - q_, g_ = np.linalg.qr(f_dash) # Eq: 3.11, DACE - # Check if F is a full rank matrix - if np.linalg.matrix_rank(g_) != min(np.size(self.F, 0), np.size(self.F, 1)): - raise NotImplementedError("Chosen regression functions are not sufficiently linearly independent") - # Design parameters (beta: regression coefficient) - beta = np.linalg.solve(g_, np.matmul(np.transpose(q_), y_dash)) - - # Design parameter (R * gamma = Y - F * beta = residual) - gamma = np.linalg.solve(c.T, (y_dash - np.matmul(f_dash, beta))) - - # Computing the process variance (Eq: 3.13, DACE) - err_var = np.zeros(self.values.shape[1]) - for i in range(self.values.shape[1]): - err_var[i] = (1 / self.samples.shape[0]) * (np.linalg.norm(y_dash[:, i] - - np.matmul(f_dash, beta[:, i])) ** 2) - - return beta, gamma, (c, c_inv, g_, f_dash, y_dash, err_var) - - def predict(self, points: np.ndarray, return_std: bool = False, correlation_model_parameters: list = None): - """ - Predict the model response at new points. - - This method evaluates the regression and correlation model at new sample points. Then, it predicts the function - value and standard deviation. - - :param points: Points at which to predict the model response. - :param return_std: Indicator to estimate standard deviation. - :param correlation_model_parameters: Hyperparameters for correlation model. - :return: Predicted values at the new points, Standard deviation of predicted values at the new points - """ - x_ = np.atleast_2d(points) - if self.normalize: - x_ = (x_ - self.sample_mean) / self.sample_std - s_ = (self.samples - self.sample_mean) / self.sample_std - else: - s_ = self.samples - fx, jf = self.regression_model.r(x_) - if correlation_model_parameters is None: - correlation_model_parameters = self.correlation_model_parameters - rx = self.correlation_model.c( - x=x_, s=s_, params=correlation_model_parameters - ) - if correlation_model_parameters is None: - beta, gamma = self.beta, self.gamma - c_inv, f_dash, g_, err_var = self.C_inv, self.F_dash, self.G, self.err_var - else: - beta, gamma, tmp = self._compute_additional_parameters( - self.correlation_model.c(x=s_, s=s_, params=correlation_model_parameters)) - c_inv, f_dash, g_, err_var = tmp[1], tmp[3], tmp[2], tmp[5] - y = np.einsum("ij,jk->ik", fx, beta) + np.einsum( - "ij,jk->ik", rx, gamma - ) - if self.normalize: - y = self.value_mean + y * self.value_std - if x_.shape[1] == 1: - y = y.flatten() - if return_std: - r_dash = np.matmul(c_inv, rx.T) - u = np.matmul(f_dash.T, r_dash) - fx.T - norm1 = np.linalg.norm(r_dash, 2, 0) - norm2 = np.linalg.norm(np.linalg.solve(g_, u), 2, 0) - mse = np.sqrt(err_var * np.atleast_2d(1 + norm2 - norm1).T) - if self.normalize: - mse = self.value_std * mse - if x_.shape[1] == 1: - mse = mse.flatten() - return y, mse - else: - return y - - def jacobian(self, points: np.ndarray): - """ - Predict the gradient of the model at new points. - - This method evaluates the regression and correlation model at new sample point. Then, it predicts the gradient - using the regression coefficients and the training second_order_tensor. - - :param points: Points at which to evaluate the gradient. - :return: Gradient of the surrogate model evaluated at the new points. - """ - x_ = np.atleast_2d(points) - if self.normalize: - x_ = (x_ - self.sample_mean) / self.sample_std - s_ = (self.samples - self.sample_mean) / self.sample_std - else: - s_ = self.samples - - fx, jf = self.regression_model.r(x_) - rx, drdx = self.correlation_model.c( - x=x_, s=s_, params=self.correlation_model_parameters, dx=True - ) - y_grad = np.einsum("ikj,jm->ik", jf, self.beta) + np.einsum( - "ijk,jm->ki", drdx.T, self.gamma - ) - if self.normalize: - y_grad = y_grad * self.value_std / self.sample_std - if x_.shape[1] == 1: - y_grad = y_grad.flatten() - return y_grad - - @staticmethod - def log_likelihood(p0, cm, s, f, y, return_grad): - # Return the log-likelihood function and it's gradient. Gradient is calculate using Central Difference - m = s.shape[0] - n = s.shape[1] - r__, dr_ = cm.c(x=s, s=s, params=p0, dt=True) - try: - cc = cholesky(r__ + 2 ** (-52) * np.eye(m), lower=True) - except np.linalg.LinAlgError: - return np.inf, np.zeros(n) - - # Product of diagonal terms is negligible sometimes, even when cc exists. - if np.prod(np.diagonal(cc)) == 0: - return np.inf, np.zeros(n) - - cc_inv = np.linalg.inv(cc) - r_inv = np.matmul(cc_inv.T, cc_inv) - f__ = cc_inv.dot(f) - y__ = cc_inv.dot(y) - - q__, g__ = np.linalg.qr(f__) # Eq: 3.11, DACE - - # Check if F is a full rank matrix - if np.linalg.matrix_rank(g__) != min(np.size(f__, 0), np.size(f__, 1)): - raise NotImplementedError( - "Chosen regression functions are not sufficiently linearly independent" - ) - - # Design parameters - beta_ = np.linalg.solve(g__, np.matmul(np.transpose(q__), y__)) - - # Computing the process variance (Eq: 3.13, DACE) - sigma_ = np.zeros(y.shape[1]) - - ll = 0 - for out_dim in range(y.shape[1]): - sigma_[out_dim] = (1 / m) * ( - np.linalg.norm(y__[:, out_dim] - np.matmul(f__, beta_[:, out_dim])) ** 2) - # Objective function:= log(det(sigma**2 * R)) + constant - ll = (ll + ( np.log(np.linalg.det(sigma_[out_dim] * r__)) + m * (np.log(2 * np.pi) + 1)) / 2) - - # Gradient of loglikelihood - # Reference: C. E. Rasmussen & C. K. I. Williams, Gaussian Processes for Machine Learning, the MIT Press, - # 2006, ISBN 026218253X. (Page 114, Eq.(5.9)) - residual = y - np.matmul(f, beta_) - gamma = np.matmul(r_inv, residual) - grad_mle = np.zeros(n) - for in_dim in range(n): - r_inv_derivative = np.matmul(r_inv, np.matmul(dr_[:, :, in_dim], r_inv)) - tmp = np.matmul(residual.T, np.matmul(r_inv_derivative, residual)) - for out_dim in range(y.shape[1]): - alpha = gamma / sigma_[out_dim] - tmp1 = np.matmul(alpha, alpha.T) - r_inv / sigma_[out_dim] - cov_der = sigma_[out_dim] * dr_[:, :, in_dim] + tmp * r__ / m - grad_mle[in_dim] = grad_mle[in_dim] - 0.5 * np.trace( - np.matmul(tmp1, cov_der) - ) - - if return_grad: - return ll, grad_mle - else: - return ll diff --git a/src/UQpy/surrogates/kriging/__init__.py b/src/UQpy/surrogates/kriging/__init__.py deleted file mode 100644 index 55a50199d..000000000 --- a/src/UQpy/surrogates/kriging/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from UQpy.surrogates.kriging.Kriging import Kriging - -from UQpy.surrogates.kriging.regression_models import * -from UQpy.surrogates.kriging.correlation_models import * diff --git a/src/UQpy/surrogates/kriging/correlation_models/CubicCorrelation.py b/src/UQpy/surrogates/kriging/correlation_models/CubicCorrelation.py deleted file mode 100644 index 3e909507f..000000000 --- a/src/UQpy/surrogates/kriging/correlation_models/CubicCorrelation.py +++ /dev/null @@ -1,28 +0,0 @@ -from UQpy.surrogates.kriging.correlation_models.baseclass.Correlation import * - - -class CubicCorrelation(Correlation): - def c(self, x, s, params, dt=False, dx=False): - zeta_matrix, dtheta_derivs, dx_derivs = Correlation.derivatives( - x_=x, s_=s, params=params - ) - # Initial matrices containing derivates for all values in array. Note since - # dtheta_s and dx_s already accounted for where derivative should be zero, all - # that must be done is multiplying the |dij| or thetaj matrix on top of a - # matrix of derivates w.r.t zeta (in this case, dzeta = -6zeta+6zeta**2) - drdt = (-6 * zeta_matrix + 6 * zeta_matrix ** 2) * dtheta_derivs - drdx = (-6 * zeta_matrix + 6 * zeta_matrix ** 2) * dx_derivs - # Also, create matrix for values of equation, 1 - 3zeta**2 + 2zeta**3, for loop - zeta_function_cubic = 1 - 3 * zeta_matrix ** 2 + 2 * zeta_matrix ** 3 - rx = np.prod(zeta_function_cubic, 2) - if dt: - # Same as previous example, loop over zeta matrix by shifting index - for i in range(len(params) - 1): - drdt = drdt * np.roll(zeta_function_cubic, i + 1, axis=2) - return rx, drdt - if dx: - # Same as previous example, loop over zeta matrix by shifting index - for i in range(len(params) - 1): - drdx = drdx * np.roll(zeta_function_cubic, i + 1, axis=2) - return rx, drdx - return rx diff --git a/src/UQpy/surrogates/kriging/correlation_models/ExponentialCorrelation.py b/src/UQpy/surrogates/kriging/correlation_models/ExponentialCorrelation.py deleted file mode 100644 index 94702760c..000000000 --- a/src/UQpy/surrogates/kriging/correlation_models/ExponentialCorrelation.py +++ /dev/null @@ -1,20 +0,0 @@ -from UQpy.surrogates.kriging.correlation_models.baseclass.Correlation import * - - -class ExponentialCorrelation(Correlation): - def c(self, x, s, params, dt=False, dx=False): - stack = Correlation.check_samples_and_return_stack(x, s) - rx = np.exp(np.sum(-params * abs(stack), axis=2)) - if dt: - drdt = -abs(stack) * np.transpose( - np.tile(rx, (np.size(x, 1), 1, 1)), (1, 2, 0) - ) - return rx, drdt - if dx: - drdx = ( - -params - * np.sign(stack) - * np.transpose(np.tile(rx, (np.size(x, 1), 1, 1)), (1, 2, 0)) - ) - return rx, drdx - return rx diff --git a/src/UQpy/surrogates/kriging/correlation_models/GaussianCorrelation.py b/src/UQpy/surrogates/kriging/correlation_models/GaussianCorrelation.py deleted file mode 100644 index 05ce09830..000000000 --- a/src/UQpy/surrogates/kriging/correlation_models/GaussianCorrelation.py +++ /dev/null @@ -1,21 +0,0 @@ -from UQpy.surrogates.kriging.correlation_models.baseclass.Correlation import * - - -class GaussianCorrelation(Correlation): - def c(self, x, s, params, dt=False, dx=False): - stack = Correlation.check_samples_and_return_stack(x, s) - rx = np.exp(np.sum(-params * (stack ** 2), axis=2)) - if dt: - drdt = -(stack ** 2) * np.transpose( - np.tile(rx, (np.size(x, 1), 1, 1)), (1, 2, 0) - ) - return rx, drdt - if dx: - drdx = ( - -2 - * params - * stack - * np.transpose(np.tile(rx, (np.size(x, 1), 1, 1)), (1, 2, 0)) - ) - return rx, drdx - return rx diff --git a/src/UQpy/surrogates/kriging/correlation_models/LinearCorrelation.py b/src/UQpy/surrogates/kriging/correlation_models/LinearCorrelation.py deleted file mode 100644 index 69d7f1506..000000000 --- a/src/UQpy/surrogates/kriging/correlation_models/LinearCorrelation.py +++ /dev/null @@ -1,35 +0,0 @@ -from UQpy.surrogates.kriging.correlation_models.baseclass.Correlation import * - - -class LinearCorrelation(Correlation): - def c(self, x, s, params, dt=False, dx=False): - stack = Correlation.check_samples_and_return_stack(x, s) - # Taking stack and turning each d value into 1-theta*dij - after_parameters = 1 - params * abs(stack) - # Define matrix of zeros to compare against (not necessary to be defined separately, - # but the line is bulky if this isn't defined first, and it is used more than once) - comp_zero = np.zeros((np.size(x, 0), np.size(s, 0), np.size(s, 1))) - # Compute matrix of max{0,1-theta*d} - max_matrix = np.maximum(after_parameters, comp_zero) - rx = np.prod(max_matrix, 2) - # Create matrix that has 1s where max_matrix is nonzero - # -Essentially, this acts as a way to store the indices of where the values are nonzero - ones_and_zeros = max_matrix.astype(bool).astype(int) - # Set initial derivatives as if all were positive - first_dtheta = -abs(stack) - first_dx = np.negative(params) * np.sign(stack) - # Multiply derivs by ones_and_zeros...this will set the values where the - # derivative should be zero to zero, and keep all other values the same - drdt = np.multiply(first_dtheta, ones_and_zeros) - drdx = np.multiply(first_dx, ones_and_zeros) - if dt: - # Loop over parameters, shifting max_matrix and multiplying over derivative matrix with each iter - for i in range(len(params) - 1): - drdt = drdt * np.roll(max_matrix, i + 1, axis=2) - return rx, drdt - if dx: - # Loop over parameters, shifting max_matrix and multiplying over derivative matrix with each iter - for i in range(len(params) - 1): - drdx = drdx * np.roll(max_matrix, i + 1, axis=2) - return rx, drdx - return rx diff --git a/src/UQpy/surrogates/kriging/correlation_models/SphericalCorrelation.py b/src/UQpy/surrogates/kriging/correlation_models/SphericalCorrelation.py deleted file mode 100644 index 1f6b8173d..000000000 --- a/src/UQpy/surrogates/kriging/correlation_models/SphericalCorrelation.py +++ /dev/null @@ -1,28 +0,0 @@ -from UQpy.surrogates.kriging.correlation_models.baseclass.Correlation import * - - -class SphericalCorrelation(Correlation): - def c(self, x, s, params, dt=False, dx=False): - zeta_matrix, dtheta_derivs, dx_derivs = Correlation.derivatives( - x_=x, s_=s, params=params - ) - # Initial matrices containing derivates for all values in array. Note since - # dtheta_s and dx_s already accounted for where derivative should be zero, all - # that must be done is multiplying the |dij| or thetaj matrix on top of a - # matrix of derivates w.r.t zeta (in this case, dzeta = -1.5+1.5zeta**2) - drdt = (-1.5 + 1.5 * zeta_matrix ** 2) * dtheta_derivs - drdx = (-1.5 + 1.5 * zeta_matrix ** 2) * dx_derivs - # Also, create matrix for values of equation, 1 - 1.5zeta + 0.5zeta**3, for loop - zeta_function = 1 - 1.5 * zeta_matrix + 0.5 * zeta_matrix ** 3 - rx = np.prod(zeta_function, 2) - if dt: - # Same as previous example, loop over zeta matrix by shifting index - for i in range(len(params) - 1): - drdt = drdt * np.roll(zeta_function, i + 1, axis=2) - return rx, drdt - if dx: - # Same as previous example, loop over zeta matrix by shifting index - for i in range(len(params) - 1): - drdx = drdx * np.roll(zeta_function, i + 1, axis=2) - return rx, drdx - return rx diff --git a/src/UQpy/surrogates/kriging/correlation_models/SplineCorrelation.py b/src/UQpy/surrogates/kriging/correlation_models/SplineCorrelation.py deleted file mode 100644 index 0aa6282d1..000000000 --- a/src/UQpy/surrogates/kriging/correlation_models/SplineCorrelation.py +++ /dev/null @@ -1,58 +0,0 @@ -from UQpy.surrogates.kriging.correlation_models.baseclass.Correlation import * - - -class SplineCorrelation(Correlation): - def c(self, x, s, params, dt=False, dx=False): - # x_, s_ = np.atleast_2d(x_), np.atleast_2d(s_) - # # Create stack matrix, where each block is x_i with all s - # stack = np.tile(np.swapaxes(np.atleast_3d(x_), 1, 2), (1, np.size(s_, 0), 1)) - np.tile(s_, ( - # np.size(x_, 0), - # 1, 1)) - stack = Correlation.check_samples_and_return_stack(x, s) - # In this case, the zeta value is just abs(stack)*parameters, no comparison - zeta_matrix = abs(stack) * params - # So, dtheta and dx are just |dj| and theta*sgn(dj), respectively - dtheta_derivs = abs(stack) - # dx_derivs = np.ones((np.size(x,0),np.size(s,0),np.size(s,1)))*parameters - dx_derivs = np.sign(stack) * params - - # Initialize empty sigma and dsigma matrices - sigma = np.ones( - (zeta_matrix.shape[0], zeta_matrix.shape[1], zeta_matrix.shape[2]) - ) - dsigma = np.ones( - (zeta_matrix.shape[0], zeta_matrix.shape[1], zeta_matrix.shape[2]) - ) - - # Loop over cases to create zeta_matrix and subsequent dR matrices - for i in range(zeta_matrix.shape[0]): - for j in range(zeta_matrix.shape[1]): - for k in range(zeta_matrix.shape[2]): - y = zeta_matrix[i, j, k] - if 0 <= y <= 0.2: - sigma[i, j, k] = 1 - 15 * y ** 2 + 30 * y ** 3 - dsigma[i, j, k] = -30 * y + 90 * y ** 2 - elif 0.2 < y < 1.0: - sigma[i, j, k] = 1.25 * (1 - y) ** 3 - dsigma[i, j, k] = 3.75 * (1 - y) ** 2 * -1 - elif y >= 1: - sigma[i, j, k] = 0 - dsigma[i, j, k] = 0 - - rx = np.prod(sigma, 2) - - if dt: - # Initialize derivative matrices incorporating chain rule - drdt = dsigma * dtheta_derivs - # Loop over to create proper matrices - for i in range(len(params) - 1): - drdt = drdt * np.roll(sigma, i + 1, axis=2) - return rx, drdt - if dx: - # Initialize derivative matrices incorporating chain rule - drdx = dsigma * dx_derivs - # Loop over to create proper matrices - for i in range(len(params) - 1): - drdx = drdx * np.roll(sigma, i + 1, axis=2) - return rx, drdx - return rx diff --git a/src/UQpy/surrogates/kriging/correlation_models/__init__.py b/src/UQpy/surrogates/kriging/correlation_models/__init__.py deleted file mode 100644 index 10f39dafc..000000000 --- a/src/UQpy/surrogates/kriging/correlation_models/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from UQpy.surrogates.kriging.correlation_models.baseclass import * -from UQpy.surrogates.kriging.correlation_models.CubicCorrelation import CubicCorrelation -from UQpy.surrogates.kriging.correlation_models.ExponentialCorrelation import ExponentialCorrelation -from UQpy.surrogates.kriging.correlation_models.GaussianCorrelation import GaussianCorrelation -from UQpy.surrogates.kriging.correlation_models.LinearCorrelation import LinearCorrelation -from UQpy.surrogates.kriging.correlation_models.SphericalCorrelation import SphericalCorrelation -from UQpy.surrogates.kriging.correlation_models.SplineCorrelation import SplineCorrelation diff --git a/src/UQpy/surrogates/kriging/correlation_models/baseclass/Correlation.py b/src/UQpy/surrogates/kriging/correlation_models/baseclass/Correlation.py deleted file mode 100644 index 703461b5f..000000000 --- a/src/UQpy/surrogates/kriging/correlation_models/baseclass/Correlation.py +++ /dev/null @@ -1,47 +0,0 @@ -from abc import ABC, abstractmethod -import numpy as np - - -class Correlation(ABC): - """ - Abstract base class of all Correlations. Serves as a template for creating new Kriging correlation - functions. - """ - - @abstractmethod - def c(self, x, s, params, dt=False, dx=False): - """ - Abstract method that needs to be implemented by the user when creating a new Correlation function. - """ - pass - - @staticmethod - def check_samples_and_return_stack(x, s): - x_, s_ = np.atleast_2d(x), np.atleast_2d(s) - # Create stack matrix, where each block is x_i with all s - stack = np.tile( - np.swapaxes(np.atleast_3d(x_), 1, 2), (1, np.size(s_, 0), 1) - ) - np.tile(s_, (np.size(x_, 0), 1, 1)) - return stack - - @staticmethod - def derivatives(x_, s_, params): - stack = Correlation.check_samples_and_return_stack(x_, s_) - # Taking stack and creating array of all thetaj*dij - after_parameters = params * abs(stack) - # Create matrix of all ones to compare - comp_ones = np.ones((np.size(x_, 0), np.size(s_, 0), np.size(s_, 1))) - # zeta_matrix has all values min{1,theta*dij} - zeta_matrix_ = np.minimum(after_parameters, comp_ones) - # Copy zeta_matrix to another matrix that will used to find where derivative should be zero - indices = zeta_matrix_.copy() - # If value of min{1,theta*dij} is 1, the derivative should be 0. - # So, replace all values of 1 with 0, then perform the .astype(bool).astype(int) - # operation like in the linear example, so you end up with an array of 1's where - # the derivative should be caluclated and 0 where it should be zero - indices[indices == 1] = 0 - # Create matrix of all |dij| (where non zero) to be used in calculation of dR/dtheta - dtheta_derivs_ = indices.astype(bool).astype(int) * abs(stack) - # Same as above, but for matrix of all thetaj where non-zero - dx_derivs_ = indices.astype(bool).astype(int) * params * np.sign(stack) - return zeta_matrix_, dtheta_derivs_, dx_derivs_ diff --git a/src/UQpy/surrogates/kriging/correlation_models/baseclass/__init__.py b/src/UQpy/surrogates/kriging/correlation_models/baseclass/__init__.py deleted file mode 100644 index e8cf1815d..000000000 --- a/src/UQpy/surrogates/kriging/correlation_models/baseclass/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from UQpy.surrogates.kriging.correlation_models.baseclass.Correlation import Correlation diff --git a/src/UQpy/surrogates/kriging/regression_models/ConstantRegression.py b/src/UQpy/surrogates/kriging/regression_models/ConstantRegression.py deleted file mode 100644 index 0e4f9e984..000000000 --- a/src/UQpy/surrogates/kriging/regression_models/ConstantRegression.py +++ /dev/null @@ -1,10 +0,0 @@ -import numpy as np -from UQpy.surrogates.kriging.regression_models.baseclass.Regression import Regression - - -class ConstantRegression(Regression): - def r(self, s): - s = np.atleast_2d(s) - fx = np.ones([np.size(s, 0), 1]) - jf = np.zeros([np.size(s, 0), np.size(s, 1), 1]) - return fx, jf diff --git a/src/UQpy/surrogates/kriging/regression_models/LinearRegression.py b/src/UQpy/surrogates/kriging/regression_models/LinearRegression.py deleted file mode 100644 index 118d8d73c..000000000 --- a/src/UQpy/surrogates/kriging/regression_models/LinearRegression.py +++ /dev/null @@ -1,12 +0,0 @@ -import numpy as np -from UQpy.surrogates.kriging.regression_models.baseclass.Regression import Regression - - -class LinearRegression(Regression): - def r(self, s): - s = np.atleast_2d(s) - fx = np.concatenate((np.ones([np.size(s, 0), 1]), s), 1) - jf_b = np.zeros([np.size(s, 0), np.size(s, 1), np.size(s, 1)]) - np.einsum("jii->ji", jf_b)[:] = 1 - jf = np.concatenate((np.zeros([np.size(s, 0), np.size(s, 1), 1]), jf_b), 2) - return fx, jf diff --git a/src/UQpy/surrogates/kriging/regression_models/QuadraticRegression.py b/src/UQpy/surrogates/kriging/regression_models/QuadraticRegression.py deleted file mode 100644 index fdddefbb5..000000000 --- a/src/UQpy/surrogates/kriging/regression_models/QuadraticRegression.py +++ /dev/null @@ -1,37 +0,0 @@ -import numpy as np -from UQpy.surrogates.kriging.regression_models.baseclass.Regression import Regression - - -class QuadraticRegression(Regression): - def r(self, s): - s = np.atleast_2d(s) - fx = np.zeros( - [np.size(s, 0), int((np.size(s, 1) + 1) * (np.size(s, 1) + 2) / 2)] - ) - jf = np.zeros( - [ - np.size(s, 0), - np.size(s, 1), - int((np.size(s, 1) + 1) * (np.size(s, 1) + 2) / 2), - ] - ) - for i in range(np.size(s, 0)): - temp = np.hstack((1, s[i, :])) - for j in range(np.size(s, 1)): - temp = np.hstack((temp, s[i, j] * s[i, j::])) - fx[i, :] = temp - # definie H matrix - h_ = 0 - for j in range(np.size(s, 1)): - tmp_ = s[i, j] * np.eye(np.size(s, 1)) - t1 = np.zeros([np.size(s, 1), np.size(s, 1)]) - t1[j, :] = s[i, :] - tmp = tmp_ + t1 - if j == 0: - h_ = tmp[:, j::] - else: - h_ = np.hstack((h_, tmp[:, j::])) - jf[i, :, :] = np.hstack( - (np.zeros([np.size(s, 1), 1]), np.eye(np.size(s, 1)), h_) - ) - return fx, jf diff --git a/src/UQpy/surrogates/kriging/regression_models/__init__.py b/src/UQpy/surrogates/kriging/regression_models/__init__.py deleted file mode 100644 index e6da265b3..000000000 --- a/src/UQpy/surrogates/kriging/regression_models/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from UQpy.surrogates.kriging.regression_models.baseclass import * -from UQpy.surrogates.kriging.regression_models.ConstantRegression import ConstantRegression -from UQpy.surrogates.kriging.regression_models.LinearRegression import LinearRegression -from UQpy.surrogates.kriging.regression_models.QuadraticRegression import QuadraticRegression diff --git a/src/UQpy/surrogates/kriging/regression_models/baseclass/Regression.py b/src/UQpy/surrogates/kriging/regression_models/baseclass/Regression.py deleted file mode 100644 index 3d435e51d..000000000 --- a/src/UQpy/surrogates/kriging/regression_models/baseclass/Regression.py +++ /dev/null @@ -1,14 +0,0 @@ -from abc import ABC, abstractmethod - - -class Regression(ABC): - """ - Abstract base class of all Regressions. Serves as a template for creating new Kriging regression - functions. - """ - @abstractmethod - def r(self, s): - """ - Abstract method that needs to be implemented by the user when creating a new Regression function. - """ - pass diff --git a/src/UQpy/surrogates/kriging/regression_models/baseclass/__init__.py b/src/UQpy/surrogates/kriging/regression_models/baseclass/__init__.py deleted file mode 100644 index 004e95e23..000000000 --- a/src/UQpy/surrogates/kriging/regression_models/baseclass/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from UQpy.surrogates.kriging.regression_models.baseclass.Regression import Regression diff --git a/tests/unit_tests/sampling/test_adaptive_kriging.py b/tests/unit_tests/sampling/test_adaptive_kriging.py index ef932fb7f..ab0c28714 100644 --- a/tests/unit_tests/sampling/test_adaptive_kriging.py +++ b/tests/unit_tests/sampling/test_adaptive_kriging.py @@ -3,7 +3,6 @@ from UQpy.run_model.model_execution.PythonModel import PythonModel from UQpy.utilities.MinimizeOptimizer import MinimizeOptimizer -from UQpy.surrogates.kriging.Kriging import Kriging from UQpy.sampling import MonteCarloSampling, AdaptiveKriging from UQpy.run_model.RunModel import RunModel from UQpy.distributions.collection import Normal diff --git a/tests/unit_tests/sampling/test_refined_stratified.py b/tests/unit_tests/sampling/test_refined_stratified.py index 15a3ccd12..6915add90 100644 --- a/tests/unit_tests/sampling/test_refined_stratified.py +++ b/tests/unit_tests/sampling/test_refined_stratified.py @@ -9,7 +9,6 @@ from UQpy.sampling.stratified_sampling.refinement.RandomRefinement import * from UQpy.sampling.stratified_sampling.strata.VoronoiStrata import * from UQpy.run_model.RunModel import * -from UQpy.surrogates.kriging.Kriging import Kriging def test_rss_simple_rectangular(): diff --git a/tests/unit_tests/surrogates/test_kriging.py b/tests/unit_tests/surrogates/test_kriging.py deleted file mode 100644 index d25084b02..000000000 --- a/tests/unit_tests/surrogates/test_kriging.py +++ /dev/null @@ -1,198 +0,0 @@ -import pytest -from UQpy.utilities.MinimizeOptimizer import MinimizeOptimizer -from beartype.roar import BeartypeCallHintPepParamException - -from UQpy.surrogates.kriging.Kriging import Kriging -import numpy as np -from UQpy.surrogates.kriging.regression_models import LinearRegression, ConstantRegression -from UQpy.surrogates.kriging.correlation_models import GaussianCorrelation - - -samples = np.linspace(0, 5, 20).reshape(-1, 1) -values = np.cos(samples) -optimizer = MinimizeOptimizer(method="L-BFGS-B") -krig = Kriging(regression_model=LinearRegression(), correlation_model=GaussianCorrelation(), optimizer=optimizer, - correlation_model_parameters=[0.14], optimize=False, random_state=1) -krig.fit(samples=samples, values=values, correlation_model_parameters=[0.3]) - -optimizer = MinimizeOptimizer(method="L-BFGS-B") -krig2 = Kriging(regression_model=ConstantRegression(), correlation_model=GaussianCorrelation(), optimizer=optimizer, - correlation_model_parameters=[0.3], bounds=[[0.01, 5]], - optimize=False, optimizations_number=100, normalize=False, - random_state=2) -krig2.fit(samples=samples, values=values) - - -# Using the in-built linear regression model as a function -linear_regression_model = Kriging(regression_model=LinearRegression(), correlation_model=GaussianCorrelation(), optimizer=optimizer, - correlation_model_parameters=[1]).regression_model -optimizer = MinimizeOptimizer(method="L-BFGS-B") -gaussian_corrleation_model = Kriging(regression_model=LinearRegression(), correlation_model=GaussianCorrelation(), optimizer=optimizer, - correlation_model_parameters=[1]).correlation_model - -optimizer = MinimizeOptimizer(method="L-BFGS-B") -krig3 = Kriging(regression_model=linear_regression_model, correlation_model=gaussian_corrleation_model, - optimizer=optimizer, - correlation_model_parameters=[1], optimize=False, normalize=False, random_state=0) -krig3.fit(samples=samples, values=values) - - -def test_predict(): - prediction = np.round(krig.predict([[1], [np.pi/2], [np.pi]], True), 3) - expected_prediction = np.array([[0.54, 0., -1.], [0., 0., 0.]]) - assert (expected_prediction == prediction).all() - - -def test_predict1(): - prediction = np.round(krig2.predict([[1], [2*np.pi], [np.pi]], True), 3) - expected_prediction = np.array([[0.54, 1.009, -1.], [0., 0.031, 0.]]) - assert (expected_prediction == prediction).all() - - -def test_predict2(): - prediction = np.round(krig3.predict([[1], [np.pi/2], [np.pi]]), 3) - expected_prediction = np.array([[0.54, -0., -1.]]) - assert (expected_prediction == prediction).all() - - -def test_jacobian(): - jacobian = np.round(krig.jacobian([[np.pi], [np.pi/2]]), 3) - expected_jacobian = np.array([-0., -1.]) - assert (expected_jacobian == jacobian).all() - - -def test_jacobian1(): - jacobian = np.round(krig3.jacobian([[np.pi], [np.pi/2]]), 3) - expected_jacobian = np.array([0., -1.]) - assert (expected_jacobian == jacobian).all() - - -def test_regression_models(): - from UQpy.surrogates.kriging.regression_models import ConstantRegression, LinearRegression, QuadraticRegression - krig.regression_model = ConstantRegression() - tmp = krig.regression_model.r([[0], [1]]) - tmp_test1 = (tmp[0] == np.array([[1.], [1.]])).all() and (tmp[1] == np.array([[[0.]], [[0.]]])).all() - - krig.regression_model = LinearRegression() - tmp = krig.regression_model.r([[0], [1]]) - tmp_test2 = (tmp[0] == (np.array([[1., 0.], [1., 1.]]))).all() and \ - (tmp[1] == np.array([[[0., 1.]], [[0., 1.]]])).all() - - krig.regression_model = QuadraticRegression() - tmp = krig.regression_model.r([[-1, 1], [2, -0.5]]) - tmp_test3 = (tmp[0] == np.array([[1., -1., 1., 1., -1., 1.], [1., 2., -0.5, 4., -1., 0.25]])).all() and \ - (tmp[1] == np.array([[[0., 1., 0., -2., 1., 0.], - [0., 0., 1., 0., -1., 2.]], - [[0., 1., 0., 4., -0.5, 0.], - [0., 0., 1., 0., 2., -1.]]])).all() - - assert tmp_test1 and tmp_test2 and tmp_test3 - - -def test_correlation_models(): - from UQpy.surrogates.kriging.correlation_models import ExponentialCorrelation, LinearCorrelation, SphericalCorrelation, CubicCorrelation, SplineCorrelation - krig.correlation_model = ExponentialCorrelation() - rx_exponential = (np.round(krig.correlation_model.c([[0], [1], [2]], [[2]], np.array([1])), 3) == - np.array([[0.135], [0.368], [1.]])).all() - drdt_exponential = (np.round(krig.correlation_model.c([[0], [1], [2]], [[2]], np.array([1]), dt=True)[1], 3) == - np.array([[[-0.271]], [[-0.368]], [[0.]]])).all() - drdx_exponential = (np.round(krig.correlation_model.c([[0], [1], [2]], [[2]], np.array([1]), dx=True)[1], 3) == - np.array([[[0.135]], [[0.368]], [[0.]]])).all() - expon = rx_exponential and drdt_exponential and drdx_exponential - - krig.correlation_model = LinearCorrelation() - rx_linear = (np.round(krig.correlation_model.c([[0], [1], [2]], [[2]], np.array([1])), 3) == - np.array([[0.], [0.], [1.]])).all() - drdt_linear = (np.round(krig.correlation_model.c([[0.4], [0.5], [0.6]], [[0.5]], np.array([1]), dt=True)[1], 3) == - np.array([[[-0.1]], [[-0.]], [[-0.1]]])).all() - drdx_linear = (np.round(krig.correlation_model.c([[0.4], [0.5], [0.6]], [[0.5]], np.array([1]), dx=True)[1], 3) == - np.array([[[1.]], [[-0.]], [[-1.]]])).all() - linear = rx_linear and drdt_linear and drdx_linear - - krig.correlation_model = SphericalCorrelation() - rx_spherical = (np.round(krig.correlation_model.c([[0], [1], [2]], [[2]], np.array([1])), 3) == - np.array([[0.], [0.], [1.]])).all() - drdt_spherical = (np.round(krig.correlation_model.c([[0.4], [0.5], [0.6]], [[0.5]], np.array([1]), dt=True)[1], 3) - == np.array([[[-0.148]], [[-0.]], [[-0.148]]])).all() - drdx_spherical = (np.round(krig.correlation_model.c([[0.4], [0.5], [0.6]], [[0.5]], np.array([1]), dx=True)[1], 3) - == np.array([[[1.485]], [[-0.]], [[-1.485]]])).all() - spherical = rx_spherical and drdt_spherical and drdx_spherical - - krig.correlation_model = CubicCorrelation() - rx_cubic = (np.round(krig.correlation_model.c([[0.2], [0.5], [1]], [[0.5]], np.array([1])), 3) == - np.array([[0.784], [1.], [0.5]])).all() - drdt_cubic = (np.round(krig.correlation_model.c([[0.4], [0.5], [0.6]], [[0.5]], np.array([1]), dt=True)[1], 3) == - np.array([[[-0.054]], [[0.]], [[-0.054]]])).all() - drdx_cubic = (np.round(krig.correlation_model.c([[0.4], [0.5], [0.6]], [[0.5]], np.array([1]), dx=True)[1], 3) == - np.array([[[0.54]], [[0.]], [[-0.54]]])).all() - cubic = rx_cubic and drdt_cubic and drdx_cubic - - krig.correlation_model = SplineCorrelation() - rx_spline = (np.round(krig.correlation_model.c([[0.2], [0.5], [1]], [[0.5]], np.array([1])), 3) == - np.array([[0.429], [1.], [0.156]])).all() - drdt_spline = (np.round(krig.correlation_model.c([[0.4], [0.5], [0.6]], [[0.5]], np.array([1]), dt=True)[1], 3) == - np.array([[[-0.21]], [[0.]], [[-0.21]]])).all() - drdx_spline = (np.round(krig.correlation_model.c([[0.4], [0.5], [0.6]], [[0.5]], np.array([1]), dx=True)[1], 3) == - np.array([[[2.1]], [[0.]], [[-2.1]]])).all() - spline = rx_spline and drdt_spline and drdx_spline - - assert expon and linear and spherical and cubic and spline - - -def test_wrong_regression_model(): - """ - Raises an error if reg_model is not callable or a string of an in-built model. - """ - with pytest.raises(BeartypeCallHintPepParamException): - Kriging(regression_model='A', correlation_model=GaussianCorrelation(), correlation_model_parameters=[1]) - - -def test_wrong_correlation_model(): - """ - Raises an error if corr_model is not callable or a string of an in-built model. - """ - with pytest.raises(BeartypeCallHintPepParamException): - Kriging(regression_model=LinearRegression(), correlation_model='A', correlation_model_parameters=[1]) - - -def test_missing_correlation_model_parameters(): - """ - Raises an error if corr_model_params is not defined. - """ - with pytest.raises(TypeError): - Kriging(regression_model=LinearRegression(), correlation_model=GaussianCorrelation(), bounds=[[0.01, 5]], - optimizations_number=100, random_state=1) - - -def test_optimizer(): - """ - Raises an error if corr_model_params is not defined. - """ - with pytest.raises(ValueError): - Kriging(regression_model=LinearRegression(), correlation_model=GaussianCorrelation(), - correlation_model_parameters=[1], optimizer='A') - - -def test_random_state(): - """ - Raises an error if type of random_state is not correct. - """ - with pytest.raises(BeartypeCallHintPepParamException): - Kriging(regression_model=LinearRegression(), correlation_model=GaussianCorrelation(), - correlation_model_parameters=[1], random_state='A') - - -def test_log_likelihood(): - prediction = np.round(krig3.log_likelihood(np.array([1.5]), - krig3.correlation_model, np.array([[1], [2]]), - np.array([[1], [1]]), np.array([[1], [2]]), return_grad=False), 3) - expected_prediction = 1.679 - assert (expected_prediction == prediction).all() - - -def test_log_likelihood_derivative(): - prediction = np.round(krig3.log_likelihood(np.array([1.5]), krig3.correlation_model, np.array([[1], [2]]), - np.array([[1], [1]]), np.array([[1], [2]]), return_grad=True)[1], 3) - expected_prediction = np.array([-0.235]) - assert (expected_prediction == prediction).all() - From 08b5f33f821a270361aeb8666be4dc7b95572dd8 Mon Sep 17 00:00:00 2001 From: Dimitris Tsapetis Date: Mon, 21 Nov 2022 12:25:17 -0500 Subject: [PATCH 80/88] Fixes AKMCS tests --- src/UQpy/utilities/MinimizeOptimizer.py | 4 +- .../sampling/test_adaptive_kriging.py | 152 ++++++++---------- 2 files changed, 72 insertions(+), 84 deletions(-) diff --git a/src/UQpy/utilities/MinimizeOptimizer.py b/src/UQpy/utilities/MinimizeOptimizer.py index aa76a99ec..d15904b01 100644 --- a/src/UQpy/utilities/MinimizeOptimizer.py +++ b/src/UQpy/utilities/MinimizeOptimizer.py @@ -24,11 +24,11 @@ def optimize(self, function, initial_guess, args=(), jac=False): return minimize(function, initial_guess, args=args, method=self.method, bounds=self._bounds, constraints=self.constraints, jac=jac, - options={'disp': True, 'maxiter': 10000, 'catol': 0.002}) + options={'disp': False, 'maxiter': 10000, 'catol': 0.002}) else: return minimize(function, initial_guess, args=args, method=self.method, bounds=self._bounds, jac=jac, - options={'disp': True, 'maxiter': 10000, 'catol': 0.002}) + options={'disp': False, 'maxiter': 10000, 'catol': 0.002}) def apply_constraints(self, constraints): if self.method.lower() in ['cobyla', 'slsqp', 'trust-constr']: diff --git a/tests/unit_tests/sampling/test_adaptive_kriging.py b/tests/unit_tests/sampling/test_adaptive_kriging.py index ab0c28714..73070dba5 100644 --- a/tests/unit_tests/sampling/test_adaptive_kriging.py +++ b/tests/unit_tests/sampling/test_adaptive_kriging.py @@ -1,5 +1,6 @@ import pytest +from UQpy import GaussianProcessRegression, RBF, LinearRegression from UQpy.run_model.model_execution.PythonModel import PythonModel from UQpy.utilities.MinimizeOptimizer import MinimizeOptimizer @@ -7,172 +8,159 @@ from UQpy.run_model.RunModel import RunModel from UQpy.distributions.collection import Normal from UQpy.sampling.adaptive_kriging_functions import * -import shutil def test_akmcs_weighted_u(): - from UQpy.surrogates.kriging.regression_models.LinearRegression import LinearRegression - from UQpy.surrogates.kriging.correlation_models.ExponentialCorrelation import ExponentialCorrelation - marginals = [Normal(loc=0., scale=4.), Normal(loc=0., scale=4.)] x = MonteCarloSampling(distributions=marginals, nsamples=20, random_state=0) model = PythonModel(model_script='series.py', model_object_name="series") rmodel = RunModel(model=model) - regression_model = LinearRegression() - correlation_model = ExponentialCorrelation() - K = Kriging(regression_model=regression_model, correlation_model=correlation_model, - optimizer=MinimizeOptimizer('l-bfgs-b'), - optimizations_number=10, correlation_model_parameters=[1, 1], random_state=1) + + kernel1 = RBF() + bounds_1 = [[10 ** (-4), 10 ** 3], [10 ** (-3), 10 ** 2], [10 ** (-3), 10 ** 2]] + optimizer1 = MinimizeOptimizer(method='L-BFGS-B', bounds=bounds_1) + gpr = GaussianProcessRegression(kernel=kernel1, hyperparameters=[1, 10 ** (-3), 10 ** (-2)], optimizer=optimizer1, + optimizations_number=10, noise=False, regression_model=LinearRegression(), + random_state=1) # OPTIONS: 'U', 'EFF', 'Weighted-U' learning_function = WeightedUFunction(weighted_u_stop=2) - a = AdaptiveKriging(distributions=marginals, runmodel_object=rmodel, surrogate=K, - learning_nsamples=10**3, n_add=1, learning_function=learning_function, + a = AdaptiveKriging(distributions=marginals, runmodel_object=rmodel, surrogate=gpr, + learning_nsamples=10 ** 3, n_add=1, learning_function=learning_function, random_state=2) a.run(nsamples=25, samples=x.samples) - assert a.samples[23, 0] == 1.083176685073489 - assert a.samples[20, 1] == 0.20293978126855253 - + assert a.samples[23, 0] == -0.48297825309989356 + assert a.samples[20, 1] == 0.39006110248010434 def test_akmcs_u(): - from UQpy.surrogates.kriging.regression_models.LinearRegression import LinearRegression - from UQpy.surrogates.kriging.correlation_models.ExponentialCorrelation import ExponentialCorrelation marginals = [Normal(loc=0., scale=4.), Normal(loc=0., scale=4.)] x = MonteCarloSampling(distributions=marginals, nsamples=20, random_state=1) model = PythonModel(model_script='series.py', model_object_name="series") rmodel = RunModel(model=model) - regression_model = LinearRegression() - correlation_model = ExponentialCorrelation() - K = Kriging(regression_model=regression_model, correlation_model=correlation_model, - optimizer=MinimizeOptimizer('l-bfgs-b'), - optimizations_number=10, correlation_model_parameters=[1, 1], random_state=0) + kernel1 = RBF() + bounds_1 = [[10 ** (-4), 10 ** 3], [10 ** (-3), 10 ** 2], [10 ** (-3), 10 ** 2]] + optimizer1 = MinimizeOptimizer(method='L-BFGS-B', bounds=bounds_1) + gpr = GaussianProcessRegression(kernel=kernel1, hyperparameters=[1, 10 ** (-3), 10 ** (-2)], optimizer=optimizer1, + optimizations_number=10, noise=False, regression_model=LinearRegression(), + random_state=0) # OPTIONS: 'U', 'EFF', 'Weighted-U' learning_function = UFunction(u_stop=2) - a = AdaptiveKriging(distributions=marginals, runmodel_object=rmodel, surrogate=K, - learning_nsamples=10**3, n_add=1, learning_function=learning_function, + a = AdaptiveKriging(distributions=marginals, runmodel_object=rmodel, surrogate=gpr, + learning_nsamples=10 ** 3, n_add=1, learning_function=learning_function, random_state=2) a.run(nsamples=25, samples=x.samples) - assert a.samples[23, 0] == -4.141979058326188 - assert a.samples[20, 1] == -1.6476534435429009 - + assert a.samples[23, 0] == -3.781937137406927 + assert a.samples[20, 1] == 0.17610325620498946 def test_akmcs_expected_feasibility(): - from UQpy.surrogates.kriging.regression_models.LinearRegression import LinearRegression - from UQpy.surrogates.kriging.correlation_models.ExponentialCorrelation import ExponentialCorrelation marginals = [Normal(loc=0., scale=4.), Normal(loc=0., scale=4.)] x = MonteCarloSampling(distributions=marginals, nsamples=20, random_state=1) model = PythonModel(model_script='series.py', model_object_name="series") rmodel = RunModel(model=model) - regression_model = LinearRegression() - correlation_model = ExponentialCorrelation() - K = Kriging(regression_model=regression_model, correlation_model=correlation_model, - optimizations_number=10, correlation_model_parameters=[1, 1], - optimizer=MinimizeOptimizer('l-bfgs-b'),) + kernel1 = RBF() + bounds_1 = [[10 ** (-4), 10 ** 3], [10 ** (-3), 10 ** 2], [10 ** (-3), 10 ** 2]] + optimizer1 = MinimizeOptimizer(method='L-BFGS-B', bounds=bounds_1) + gpr = GaussianProcessRegression(kernel=kernel1, hyperparameters=[1, 10 ** (-3), 10 ** (-2)], optimizer=optimizer1, + optimizations_number=20, noise=False, regression_model=LinearRegression(), + random_state=0) # OPTIONS: 'U', 'EFF', 'Weighted-U' learning_function = ExpectedFeasibility(eff_a=0, eff_epsilon=2, eff_stop=0.001) - a = AdaptiveKriging(distributions=marginals, runmodel_object=rmodel, surrogate=K, - learning_nsamples=10**3, n_add=1, learning_function=learning_function, + a = AdaptiveKriging(distributions=marginals, runmodel_object=rmodel, surrogate=gpr, + learning_nsamples=10 ** 3, n_add=1, learning_function=learning_function, random_state=2) a.run(nsamples=25, samples=x.samples) - assert a.samples[23, 0] == 1.366058523912817 - assert a.samples[20, 1] == -12.914668932772358 - + assert a.samples[23, 0] == 5.423754197908594 + assert a.samples[20, 1] == 2.0355505295053384 def test_akmcs_expected_improvement(): - from UQpy.surrogates.kriging.regression_models.LinearRegression import LinearRegression - from UQpy.surrogates.kriging.correlation_models.ExponentialCorrelation import ExponentialCorrelation marginals = [Normal(loc=0., scale=4.), Normal(loc=0., scale=4.)] x = MonteCarloSampling(distributions=marginals, nsamples=20, random_state=1) model = PythonModel(model_script='series.py', model_object_name="series") rmodel = RunModel(model=model) - regression_model = LinearRegression() - correlation_model = ExponentialCorrelation() - K = Kriging(regression_model=regression_model, correlation_model=correlation_model, - optimizations_number=10, correlation_model_parameters=[1, 1], - optimizer=MinimizeOptimizer('l-bfgs-b'),) + kernel1 = RBF() + bounds_1 = [[10 ** (-4), 10 ** 3], [10 ** (-3), 10 ** 2], [10 ** (-3), 10 ** 2]] + optimizer1 = MinimizeOptimizer(method='L-BFGS-B', bounds=bounds_1) + gpr = GaussianProcessRegression(kernel=kernel1, hyperparameters=[1, 10 ** (-3), 10 ** (-2)], optimizer=optimizer1, + optimizations_number=50, noise=False, regression_model=LinearRegression(), + random_state=0) # OPTIONS: 'U', 'EFF', 'Weighted-U' learning_function = ExpectedImprovement() - a = AdaptiveKriging(distributions=marginals, runmodel_object=rmodel, surrogate=K, - learning_nsamples=10**3, n_add=1, learning_function=learning_function, + a = AdaptiveKriging(distributions=marginals, runmodel_object=rmodel, surrogate=gpr, + learning_nsamples=10 ** 3, n_add=1, learning_function=learning_function, random_state=2) a.run(nsamples=25, samples=x.samples) - assert a.samples[23, 0] == 4.553078100499578 - assert a.samples[20, 1] == -3.508949564718469 - + assert a.samples[21, 0] == 6.878734574049913 + assert a.samples[20, 1] == -6.3410533857909215 def test_akmcs_expected_improvement_global_fit(): - from UQpy.surrogates.kriging.regression_models.LinearRegression import LinearRegression - from UQpy.surrogates.kriging.correlation_models.ExponentialCorrelation import ExponentialCorrelation marginals = [Normal(loc=0., scale=4.), Normal(loc=0., scale=4.)] x = MonteCarloSampling(distributions=marginals, nsamples=20, random_state=1) model = PythonModel(model_script='series.py', model_object_name="series") rmodel = RunModel(model=model) - regression_model = LinearRegression() - correlation_model = ExponentialCorrelation() - K = Kriging(regression_model=regression_model, correlation_model=correlation_model, - optimizations_number=10, correlation_model_parameters=[1, 1], - optimizer=MinimizeOptimizer('l-bfgs-b'),) + kernel1 = RBF() + bounds_1 = [[10 ** (-4), 10 ** 3], [10 ** (-3), 10 ** 2], [10 ** (-3), 10 ** 2]] + optimizer1 = MinimizeOptimizer(method='L-BFGS-B', bounds=bounds_1) + gpr = GaussianProcessRegression(kernel=kernel1, hyperparameters=[1, 10 ** (-3), 10 ** (-2)], optimizer=optimizer1, + optimizations_number=50, noise=False, regression_model=LinearRegression(), + random_state=0) # OPTIONS: 'U', 'EFF', 'Weighted-U' learning_function = ExpectedImprovementGlobalFit() - a = AdaptiveKriging(distributions=marginals, runmodel_object=rmodel, surrogate=K, - learning_nsamples=10**3, n_add=1, learning_function=learning_function, + a = AdaptiveKriging(distributions=marginals, runmodel_object=rmodel, surrogate=gpr, + learning_nsamples=10 ** 3, n_add=1, learning_function=learning_function, random_state=2) a.run(nsamples=25, samples=x.samples) - assert a.samples[23, 0] == 11.939859785098493 - assert a.samples[20, 1] == -8.429899469300118 + assert a.samples[23, 0] == -10.24267076486663 + assert a.samples[20, 1] == -11.419510366469687 def test_akmcs_samples_error(): - from UQpy.surrogates.kriging.regression_models.LinearRegression import LinearRegression - from UQpy.surrogates.kriging.correlation_models.ExponentialCorrelation import ExponentialCorrelation marginals = [Normal(loc=0., scale=4.), Normal(loc=0., scale=4.)] x = MonteCarloSampling(distributions=marginals, nsamples=20, random_state=0) model = PythonModel(model_script='series.py', model_object_name="series") rmodel = RunModel(model=model) - regression_model = LinearRegression() - correlation_model = ExponentialCorrelation() - K = Kriging(regression_model=regression_model, correlation_model=correlation_model, - optimizer=MinimizeOptimizer('l-bfgs-b'), - optimizations_number=10, correlation_model_parameters=[1, 1], random_state=1) + kernel1 = RBF() + bounds_1 = [[10 ** (-4), 10 ** 3], [10 ** (-3), 10 ** 2], [10 ** (-3), 10 ** 2]] + optimizer1 = MinimizeOptimizer(method='L-BFGS-B', bounds=bounds_1) + gpr = GaussianProcessRegression(kernel=kernel1, hyperparameters=[1, 10 ** (-3), 10 ** (-2)], optimizer=optimizer1, + optimizations_number=50, noise=False, regression_model=LinearRegression(), + random_state=0) # OPTIONS: 'U', 'EFF', 'Weighted-U' learning_function = WeightedUFunction(weighted_u_stop=2) with pytest.raises(NotImplementedError): - a = AdaptiveKriging(distributions=[Normal(loc=0., scale=4.)]*3, runmodel_object=rmodel, surrogate=K, - learning_nsamples=10**3, n_add=1, learning_function=learning_function, + a = AdaptiveKriging(distributions=[Normal(loc=0., scale=4.)] * 3, runmodel_object=rmodel, surrogate=gpr, + learning_nsamples=10 ** 3, n_add=1, learning_function=learning_function, random_state=2, samples=x.samples) def test_akmcs_u_run_from_init(): - from UQpy.surrogates.kriging.regression_models.LinearRegression import LinearRegression - from UQpy.surrogates.kriging.correlation_models.ExponentialCorrelation import ExponentialCorrelation - marginals = [Normal(loc=0., scale=4.), Normal(loc=0., scale=4.)] x = MonteCarloSampling(distributions=marginals, nsamples=20, random_state=1) model = PythonModel(model_script='series.py', model_object_name="series") rmodel = RunModel(model=model) - regression_model = LinearRegression() - correlation_model = ExponentialCorrelation() - K = Kriging(regression_model=regression_model, correlation_model=correlation_model, - optimizer=MinimizeOptimizer('l-bfgs-b'), - optimizations_number=10, correlation_model_parameters=[1, 1], random_state=0) + kernel1 = RBF() + bounds_1 = [[10 ** (-4), 10 ** 3], [10 ** (-3), 10 ** 2], [10 ** (-3), 10 ** 2]] + optimizer1 = MinimizeOptimizer(method='L-BFGS-B', bounds=bounds_1) + gpr = GaussianProcessRegression(kernel=kernel1, hyperparameters=[1, 10 ** (-3), 10 ** (-2)], optimizer=optimizer1, + optimizations_number=100, noise=False, regression_model=LinearRegression(), + random_state=0) # OPTIONS: 'U', 'EFF', 'Weighted-U' learning_function = UFunction(u_stop=2) - a = AdaptiveKriging(distributions=marginals, runmodel_object=rmodel, surrogate=K, - learning_nsamples=10**3, n_add=1, learning_function=learning_function, + a = AdaptiveKriging(distributions=marginals, runmodel_object=rmodel, surrogate=gpr, + learning_nsamples=10 ** 3, n_add=1, learning_function=learning_function, random_state=2, nsamples=25, samples=x.samples) - assert a.samples[23, 0] == -4.141979058326188 - assert a.samples[20, 1] == -1.6476534435429009 + assert a.samples[23, 0] == -3.781937137406927 + assert a.samples[20, 1] == 0.17610325620498946 From 2818b256a9edf049914a2a0943d1f2db52f42af6 Mon Sep 17 00:00:00 2001 From: Dimitris Tsapetis Date: Mon, 21 Nov 2022 14:08:51 -0500 Subject: [PATCH 81/88] Fixes RSS tests --- .../RefinedStratifiedSampling.py | 2 +- .../refinement/baseclass/Refinement.py | 2 +- .../sampling/test_adaptive_kriging.py | 4 +- .../sampling/test_refined_stratified.py | 45 ++++++++----------- 4 files changed, 22 insertions(+), 31 deletions(-) diff --git a/src/UQpy/sampling/stratified_sampling/RefinedStratifiedSampling.py b/src/UQpy/sampling/stratified_sampling/RefinedStratifiedSampling.py index 6a233ea1e..e891651c1 100644 --- a/src/UQpy/sampling/stratified_sampling/RefinedStratifiedSampling.py +++ b/src/UQpy/sampling/stratified_sampling/RefinedStratifiedSampling.py @@ -45,7 +45,7 @@ def __init__( self.random_state = random_state if isinstance(self.random_state, int): - self.random_state = np.random.RandomState(self.random_state) + self.random_state = np.random.default_rng(self.random_state) elif not isinstance(self.random_state, (type(None), np.random.RandomState)): raise TypeError('UQpy: random_state must be None, an int or an np.random.Generator object.') if self.random_state is None: diff --git a/src/UQpy/sampling/stratified_sampling/refinement/baseclass/Refinement.py b/src/UQpy/sampling/stratified_sampling/refinement/baseclass/Refinement.py index 9e3f37f78..0ca80e523 100644 --- a/src/UQpy/sampling/stratified_sampling/refinement/baseclass/Refinement.py +++ b/src/UQpy/sampling/stratified_sampling/refinement/baseclass/Refinement.py @@ -46,7 +46,7 @@ def finalize(self, samples, samples_per_iteration): def identify_bins(strata_metrics, points_to_add, random_state): bins2break = np.array([]) points_left = points_to_add - while (np.where(strata_metrics == strata_metrics.max())[0].shape[0] < points_left): + while np.where(strata_metrics == strata_metrics.max())[0].shape[0] < points_left: bin = np.where(strata_metrics == strata_metrics.max())[0] bins2break = np.hstack([bins2break, bin]) strata_metrics[bin] = 0 diff --git a/tests/unit_tests/sampling/test_adaptive_kriging.py b/tests/unit_tests/sampling/test_adaptive_kriging.py index 73070dba5..9e75f9003 100644 --- a/tests/unit_tests/sampling/test_adaptive_kriging.py +++ b/tests/unit_tests/sampling/test_adaptive_kriging.py @@ -52,7 +52,7 @@ def test_akmcs_u(): random_state=2) a.run(nsamples=25, samples=x.samples) - assert a.samples[23, 0] == -3.781937137406927 + assert a.samples[23, 0] == 4.027342825480197 assert a.samples[20, 1] == 0.17610325620498946 @@ -75,7 +75,7 @@ def test_akmcs_expected_feasibility(): random_state=2) a.run(nsamples=25, samples=x.samples) - assert a.samples[23, 0] == 5.423754197908594 + assert a.samples[23, 0] == 4.553078100499578 assert a.samples[20, 1] == 2.0355505295053384 diff --git a/tests/unit_tests/sampling/test_refined_stratified.py b/tests/unit_tests/sampling/test_refined_stratified.py index 6915add90..7389d7101 100644 --- a/tests/unit_tests/sampling/test_refined_stratified.py +++ b/tests/unit_tests/sampling/test_refined_stratified.py @@ -1,6 +1,7 @@ import pytest from beartype.roar import BeartypeCallHintPepParamException +from UQpy import GaussianProcessRegression, RBF, LinearRegression from UQpy.run_model.model_execution.PythonModel import PythonModel from UQpy.utilities.MinimizeOptimizer import MinimizeOptimizer from UQpy.sampling.stratified_sampling.refinement.GradientEnhancedRefinement import GradientEnhancedRefinement @@ -71,31 +72,22 @@ def test_rect_gerss(): x = TrueStratifiedSampling(distributions=marginals, strata_object=strata, nsamples_per_stratum=1) model = PythonModel(model_script='python_model_function.py', model_object_name="y_func") rmodel = RunModel(model=model) - from UQpy.surrogates.kriging.regression_models import LinearRegression - from UQpy.surrogates.kriging.correlation_models import ExponentialCorrelation - K = Kriging(regression_model=LinearRegression(), correlation_model=ExponentialCorrelation(), optimizations_number=20, random_state=0, - correlation_model_parameters=[1, 1], optimizer=MinimizeOptimizer('l-bfgs-b'), ) - K.fit(samples=x.samples, values=rmodel.qoi_list) + kernel1 = RBF() + bounds_1 = [[10 ** (-4), 10 ** 3], [10 ** (-3), 10 ** 2], [10 ** (-3), 10 ** 2]] + optimizer1 = MinimizeOptimizer(method='L-BFGS-B', bounds=bounds_1) + gpr = GaussianProcessRegression(kernel=kernel1, hyperparameters=[1, 10 ** (-3), 10 ** (-2)], optimizer=optimizer1, + optimizations_number=100, noise=False, regression_model=LinearRegression(), + random_state=0) + # gpr.fit(samples=x.samples, values=rmodel.qoi_list) refinement = GradientEnhancedRefinement(strata=x.strata_object, runmodel_object=rmodel, - surrogate=K, nearest_points_number=4) + surrogate=gpr, nearest_points_number=4) z = RefinedStratifiedSampling(stratified_sampling=x, random_state=2, refinement_algorithm=refinement) z.run(nsamples=6) assert np.allclose(z.samples, np.array([[0.417022, 0.36016225], [1.00011437, 0.15116629], [0.14675589, 0.5461693], [1.18626021, 0.67278036], - [1.51296312, 0.77483124], [0.74237455, 0.66026822]])) - # assert np.allclose(z.samples, np.array([[0.417022, 0.36016225], [1.00011437, 0.15116629], - # [0.14675589, 0.5461693], [1.18626021, 0.67278036], - # [1.59254104, 0.96577043], [1.97386531, 0.24237455]])) - # assert np.allclose(z.samples, np.array([[0.417022, 0.36016225], [1.00011437, 0.15116629], - # [0.14675589, 0.5461693], [1.18626021, 0.67278036], - # [1.59254104, 0.96577043], [1.7176612, 0.2101839]])) - # assert np.allclose(z.samplesU01, np.array([[0.208511, 0.36016225], [0.50005719, 0.15116629], - # [0.07337795, 0.5461693], [0.59313011, 0.67278036], - # [0.79627052, 0.96577043], [0.98693265, 0.24237455]])) - # assert np.allclose(z.samplesU01, np.array([[0.208511, 0.36016225], [0.50005719, 0.15116629], - # [0.07337795, 0.5461693], [0.59313011, 0.67278036], - # [0.79627052, 0.96577043], [0.8588306 , 0.2101839]])) + [1.64924557, 0.90711287], + [0.54595797, 0.30005026]])) def test_vor_rss(): @@ -123,19 +115,18 @@ def test_vor_gerss(): marginals = [Uniform(loc=0., scale=2.), Uniform(loc=0., scale=1.)] strata_vor = VoronoiStrata(seeds_number=4, dimension=2, random_state=10) x_vor = TrueStratifiedSampling(distributions=marginals, strata_object=strata_vor, nsamples_per_stratum=1, ) - from UQpy.surrogates.kriging.regression_models.LinearRegression import LinearRegression - from UQpy.surrogates.kriging.correlation_models.ExponentialCorrelation import ExponentialCorrelation model = PythonModel(model_script='python_model_function.py', model_object_name="y_func") rmodel = RunModel(model=model) - K_ = Kriging(regression_model=LinearRegression(), correlation_model=ExponentialCorrelation(), optimizations_number=20, - optimizer=MinimizeOptimizer('l-bfgs-b'), random_state=0, - correlation_model_parameters=[1, 1]) - - K_.fit(samples=x_vor.samples, values=rmodel.qoi_list) + kernel1 = RBF() + bounds_1 = [[10 ** (-4), 10 ** 3], [10 ** (-3), 10 ** 2], [10 ** (-3), 10 ** 2]] + optimizer1 = MinimizeOptimizer(method='L-BFGS-B', bounds=bounds_1) + gpr = GaussianProcessRegression(kernel=kernel1, hyperparameters=[1, 10 ** (-3), 10 ** (-2)], optimizer=optimizer1, + optimizations_number=100, noise=False, regression_model=LinearRegression(), + random_state=0) z_vor = RefinedStratifiedSampling(stratified_sampling=x_vor, nsamples=6, random_state=x_vor.random_state, refinement_algorithm=GradientEnhancedRefinement(strata=x_vor.strata_object, runmodel_object=rmodel, - surrogate=K_, + surrogate=gpr, nearest_points_number=4)) assert np.allclose(z_vor.samples, np.array([[1.78345908, 0.01640854], [1.46201137, 0.70862104], [0.4021338, 0.05290083], [0.1062376, 0.88958226], From 38c96f502c99785550489d261ac74a3d4bf19ddf Mon Sep 17 00:00:00 2001 From: Dimitris Tsapetis Date: Mon, 21 Nov 2022 15:27:53 -0500 Subject: [PATCH 82/88] Fixes RSS tests --- src/UQpy/sampling/SimplexSampling.py | 2 +- .../sampling/test_adaptive_kriging.py | 8 ++-- .../sampling/test_refined_stratified.py | 40 +++++++++---------- 3 files changed, 25 insertions(+), 25 deletions(-) diff --git a/src/UQpy/sampling/SimplexSampling.py b/src/UQpy/sampling/SimplexSampling.py index 2b7426635..613bba256 100644 --- a/src/UQpy/sampling/SimplexSampling.py +++ b/src/UQpy/sampling/SimplexSampling.py @@ -10,7 +10,7 @@ def __init__( self, nodes: Union[list, Numpy2DFloatArray] = None, nsamples: PositiveInteger = None, - random_state: RandomStateType = None, + random_state: Union[RandomStateType, np.random.Generator] = None, ): """ Generate uniform random samples inside an n-dimensional simplex. diff --git a/tests/unit_tests/sampling/test_adaptive_kriging.py b/tests/unit_tests/sampling/test_adaptive_kriging.py index 9e75f9003..4fcf3b1e6 100644 --- a/tests/unit_tests/sampling/test_adaptive_kriging.py +++ b/tests/unit_tests/sampling/test_adaptive_kriging.py @@ -43,7 +43,7 @@ def test_akmcs_u(): bounds_1 = [[10 ** (-4), 10 ** 3], [10 ** (-3), 10 ** 2], [10 ** (-3), 10 ** 2]] optimizer1 = MinimizeOptimizer(method='L-BFGS-B', bounds=bounds_1) gpr = GaussianProcessRegression(kernel=kernel1, hyperparameters=[1, 10 ** (-3), 10 ** (-2)], optimizer=optimizer1, - optimizations_number=10, noise=False, regression_model=LinearRegression(), + optimizations_number=100, noise=False, regression_model=LinearRegression(), random_state=0) # OPTIONS: 'U', 'EFF', 'Weighted-U' learning_function = UFunction(u_stop=2) @@ -52,7 +52,7 @@ def test_akmcs_u(): random_state=2) a.run(nsamples=25, samples=x.samples) - assert a.samples[23, 0] == 4.027342825480197 + assert a.samples[23, 0] == -3.781937137406927 assert a.samples[20, 1] == 0.17610325620498946 @@ -66,7 +66,7 @@ def test_akmcs_expected_feasibility(): bounds_1 = [[10 ** (-4), 10 ** 3], [10 ** (-3), 10 ** 2], [10 ** (-3), 10 ** 2]] optimizer1 = MinimizeOptimizer(method='L-BFGS-B', bounds=bounds_1) gpr = GaussianProcessRegression(kernel=kernel1, hyperparameters=[1, 10 ** (-3), 10 ** (-2)], optimizer=optimizer1, - optimizations_number=20, noise=False, regression_model=LinearRegression(), + optimizations_number=100, noise=False, regression_model=LinearRegression(), random_state=0) # OPTIONS: 'U', 'EFF', 'Weighted-U' learning_function = ExpectedFeasibility(eff_a=0, eff_epsilon=2, eff_stop=0.001) @@ -75,7 +75,7 @@ def test_akmcs_expected_feasibility(): random_state=2) a.run(nsamples=25, samples=x.samples) - assert a.samples[23, 0] == 4.553078100499578 + assert a.samples[23, 0] == 5.423754197908594 assert a.samples[20, 1] == 2.0355505295053384 diff --git a/tests/unit_tests/sampling/test_refined_stratified.py b/tests/unit_tests/sampling/test_refined_stratified.py index 7389d7101..18b617cc7 100644 --- a/tests/unit_tests/sampling/test_refined_stratified.py +++ b/tests/unit_tests/sampling/test_refined_stratified.py @@ -23,10 +23,10 @@ def test_rss_simple_rectangular(): samples_per_iteration=2, refinement_algorithm=algorithm, random_state=2) - assert y.samples[16, 0] == 0.06614276178462988 - assert y.samples[16, 1] == 0.7836449863362334 - assert y.samples[17, 0] == 0.1891972651582183 - assert y.samples[17, 1] == 0.2961099664117288 + assert y.samples[16, 0] == 0.22677821757428504 + assert y.samples[16, 1] == 0.2729789855337742 + assert y.samples[17, 0] == 0.07501256574570675 + assert y.samples[17, 1] == 0.9321401317029486 def test_rss_simple_voronoi(): @@ -40,10 +40,10 @@ def test_rss_simple_voronoi(): samples_per_iteration=2, refinement_algorithm=algorithm, random_state=2) - assert np.round(y.samples[16, 0], 6) == 0.363793 - assert np.round(y.samples[16, 1], 6) == 0.467625 - assert np.round(y.samples[17, 0], 6) == 0.424586 - assert np.round(y.samples[17, 1], 6) == 0.217301 + assert np.round(y.samples[16, 0], 6) == 0.324738 + assert np.round(y.samples[16, 1], 6) == 0.488029 + assert np.round(y.samples[17, 0], 6) == 0.349367 + assert np.round(y.samples[17, 1], 6) == 0.132426 def test_rect_rss(): @@ -57,10 +57,10 @@ def test_rect_rss(): refinement_algorithm=RandomRefinement(strata=strata)) assert np.allclose(y.samples, np.array([[0.417022, 0.36016225], [1.00011437, 0.15116629], [0.14675589, 0.5461693], [1.18626021, 0.67278036], - [0.77483124, 0.7176612], [1.7101839, 0.66516741]])) + [1.90711287, 0.04595797], [0.80005026, 0.86428026]])) assert np.allclose(np.array(y.samplesU01), np.array([[0.208511, 0.36016225], [0.50005719, 0.15116629], [0.07337795, 0.5461693], [0.59313011, 0.67278036], - [0.38741562, 0.7176612], [0.85509195, 0.66516741]])) + [0.95355644, 0.04595797], [0.40002513, 0.86428026]])) def test_rect_gerss(): @@ -123,17 +123,17 @@ def test_vor_gerss(): gpr = GaussianProcessRegression(kernel=kernel1, hyperparameters=[1, 10 ** (-3), 10 ** (-2)], optimizer=optimizer1, optimizations_number=100, noise=False, regression_model=LinearRegression(), random_state=0) - z_vor = RefinedStratifiedSampling(stratified_sampling=x_vor, nsamples=6, random_state=x_vor.random_state, + z_vor = RefinedStratifiedSampling(stratified_sampling=x_vor, nsamples=6, random_state=0, refinement_algorithm=GradientEnhancedRefinement(strata=x_vor.strata_object, runmodel_object=rmodel, surrogate=gpr, nearest_points_number=4)) assert np.allclose(z_vor.samples, np.array([[1.78345908, 0.01640854], [1.46201137, 0.70862104], [0.4021338, 0.05290083], [0.1062376, 0.88958226], - [0.61246269, 0.47160095], [1.16609055, 0.30832536]])) + [0.66730342, 0.46988084], [1.37411577, 0.39064685]])) assert np.allclose(z_vor.samplesU01, np.array([[0.89172954, 0.01640854], [0.73100569, 0.70862104], [0.2010669, 0.05290083], [0.0531188, 0.88958226], - [0.30623134, 0.47160095], [0.58304527, 0.30832536]])) + [0.33365171, 0.46988084], [0.68705789, 0.39064685]])) def test_rss_random_state(): @@ -155,17 +155,17 @@ def test_rss_runmodel_object(): marginals = [Uniform(loc=0., scale=2.), Uniform(loc=0., scale=1.)] strata = RectangularStrata(strata_number=[2, 2]) x = TrueStratifiedSampling(distributions=marginals, strata_object=strata, nsamples_per_stratum=1, random_state=1) - from UQpy.surrogates.kriging.regression_models import LinearRegression - from UQpy.surrogates.kriging.correlation_models import ExponentialCorrelation - - K = Kriging(regression_model=LinearRegression(), correlation_model=ExponentialCorrelation(), optimizations_number=20, - correlation_model_parameters=[1, 1], optimizer=MinimizeOptimizer('l-bfgs-b'), ) + kernel1 = RBF() + bounds_1 = [[10 ** (-4), 10 ** 3], [10 ** (-3), 10 ** 2], [10 ** (-3), 10 ** 2]] + optimizer1 = MinimizeOptimizer(method='L-BFGS-B', bounds=bounds_1) + gpr = GaussianProcessRegression(kernel=kernel1, hyperparameters=[1, 10 ** (-3), 10 ** (-2)], optimizer=optimizer1, + optimizations_number=100, noise=False, regression_model=LinearRegression(), + random_state=0) model = PythonModel(model_script='python_model_function.py', model_object_name="y_func") rmodel = RunModel(model=model) - K.fit(samples=x.samples, values=rmodel.qoi_list) with pytest.raises(BeartypeCallHintPepParamException): refinement = GradientEnhancedRefinement(strata=x.strata_object, runmodel_object='abc', - surrogate=K) + surrogate=gpr) RefinedStratifiedSampling(stratified_sampling=x, samples_number=6, samples_per_iteration=2, refinement_algorithm=refinement) From 1ed054000f8080d5e76d5abbbda8d944113709d7 Mon Sep 17 00:00:00 2001 From: Dimitris Tsapetis Date: Mon, 21 Nov 2022 16:20:43 -0500 Subject: [PATCH 83/88] Fixes RSS tests --- tests/unit_tests/sampling/test_refined_stratified.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit_tests/sampling/test_refined_stratified.py b/tests/unit_tests/sampling/test_refined_stratified.py index 18b617cc7..695bf8383 100644 --- a/tests/unit_tests/sampling/test_refined_stratified.py +++ b/tests/unit_tests/sampling/test_refined_stratified.py @@ -130,7 +130,7 @@ def test_vor_gerss(): nearest_points_number=4)) assert np.allclose(z_vor.samples, np.array([[1.78345908, 0.01640854], [1.46201137, 0.70862104], [0.4021338, 0.05290083], [0.1062376, 0.88958226], - [0.66730342, 0.46988084], [1.37411577, 0.39064685]])) + [0.66730342, 0.46988084], [1.5015904 , 0.97050966]])) assert np.allclose(z_vor.samplesU01, np.array([[0.89172954, 0.01640854], [0.73100569, 0.70862104], [0.2010669, 0.05290083], [0.0531188, 0.88958226], [0.33365171, 0.46988084], [0.68705789, 0.39064685]])) From 32bdd299f9f9f94df7d96d4e1187a8e962549c62 Mon Sep 17 00:00:00 2001 From: Dimitris Tsapetis Date: Mon, 21 Nov 2022 17:19:35 -0500 Subject: [PATCH 84/88] Fixes RSS tests --- tests/unit_tests/sampling/test_refined_stratified.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit_tests/sampling/test_refined_stratified.py b/tests/unit_tests/sampling/test_refined_stratified.py index 695bf8383..13afea15f 100644 --- a/tests/unit_tests/sampling/test_refined_stratified.py +++ b/tests/unit_tests/sampling/test_refined_stratified.py @@ -133,7 +133,7 @@ def test_vor_gerss(): [0.66730342, 0.46988084], [1.5015904 , 0.97050966]])) assert np.allclose(z_vor.samplesU01, np.array([[0.89172954, 0.01640854], [0.73100569, 0.70862104], [0.2010669, 0.05290083], [0.0531188, 0.88958226], - [0.33365171, 0.46988084], [0.68705789, 0.39064685]])) + [0.33365171, 0.46988084], [0.7507952 , 0.97050966]])) def test_rss_random_state(): From a5f9fc13edc9b4b54e5bbbf4209197495469cabf Mon Sep 17 00:00:00 2001 From: Dimitris Tsapetis Date: Tue, 17 Jan 2023 15:10:15 -0500 Subject: [PATCH 85/88] Update requirements.txt --- requirements.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index 1386fa38c..4dc4412a3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,9 +7,9 @@ pytest == 6.1.2 coverage == 5.3 pytest-cov == 2.10.1 pylint == 2.6.0 -wheel == 0.36.2 +wheel == 0.38.1 pytest-azurepipelines == 0.8.0 twine == 3.4.1 pathlib~=1.0.1 beartype ==0.9.1 -setuptools~=58.0.4 \ No newline at end of file +setuptools~=65.5.1 From 5cc7e42fb19e711273d53a5a6470be3dbe6c7f73 Mon Sep 17 00:00:00 2001 From: Dimitris Tsapetis Date: Tue, 17 Jan 2023 15:19:13 -0500 Subject: [PATCH 86/88] Adds option to increase gauss points also in Nataf initializer --- src/UQpy/transformations/Nataf.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/UQpy/transformations/Nataf.py b/src/UQpy/transformations/Nataf.py index 90c3adf2c..5c45e7b53 100644 --- a/src/UQpy/transformations/Nataf.py +++ b/src/UQpy/transformations/Nataf.py @@ -32,6 +32,7 @@ def __init__( itam_threshold1: Union[float, int] = 0.001, itam_threshold2: Union[float, int] = 0.1, itam_max_iter: int = 100, + n_gauss_points: int = 1024 ): """ Transform random variables using the Nataf or Inverse Nataf transformation @@ -63,6 +64,8 @@ def __init__( for iteration :math:`i`. Default: :math:`0.01` :param itam_max_iter: Maximum number of iterations for the ITAM method. Default: :math:`100` + :param n_gauss_points: The number of integration points used for the numerical integration of the + correlation matrix (:math:`\mathbf{C_Z}`) of the standard normal random vector **Z** """ self.n_dimensions = 0 if isinstance(distributions, list): @@ -108,7 +111,7 @@ def __init__( elif all(isinstance(x, Normal) for x in distributions): self.corr_x = self.corr_z else: - self.corr_x = self.distortion_z2x(self.dist_object, self.corr_z, n_gauss_points=128) + self.corr_x = self.distortion_z2x(self.dist_object, self.corr_z, n_gauss_points=n_gauss_points) self.H: NumpyFloatArray = cholesky(self.corr_z, lower=True) """The lower triangular matrix resulting from the Cholesky decomposition of the correlation matrix From 13c55395299fd53edb25070a7ee5b52f7b12fea1 Mon Sep 17 00:00:00 2001 From: Dimitris Tsapetis Date: Tue, 17 Jan 2023 15:56:01 -0500 Subject: [PATCH 87/88] Disables execution of monte carlo example --- .../sampling/monte_carlo/{plot_monte_carlo.py => monte_carlo.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename docs/code/sampling/monte_carlo/{plot_monte_carlo.py => monte_carlo.py} (100%) diff --git a/docs/code/sampling/monte_carlo/plot_monte_carlo.py b/docs/code/sampling/monte_carlo/monte_carlo.py similarity index 100% rename from docs/code/sampling/monte_carlo/plot_monte_carlo.py rename to docs/code/sampling/monte_carlo/monte_carlo.py From bb06927f5d1d2a0443167f537e6ad39f8be16c9e Mon Sep 17 00:00:00 2001 From: Dimitris Tsapetis Date: Tue, 17 Jan 2023 16:51:11 -0500 Subject: [PATCH 88/88] Changes default number of Nataf integration points --- src/UQpy/transformations/Nataf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/UQpy/transformations/Nataf.py b/src/UQpy/transformations/Nataf.py index 5c45e7b53..66bd16c71 100644 --- a/src/UQpy/transformations/Nataf.py +++ b/src/UQpy/transformations/Nataf.py @@ -32,7 +32,7 @@ def __init__( itam_threshold1: Union[float, int] = 0.001, itam_threshold2: Union[float, int] = 0.1, itam_max_iter: int = 100, - n_gauss_points: int = 1024 + n_gauss_points: int = 128 ): """ Transform random variables using the Nataf or Inverse Nataf transformation