From e859d3cad4c8ec905d520fb3dc3e6f3c0389b07f Mon Sep 17 00:00:00 2001 From: Dimitris Tsapetis Date: Mon, 24 Apr 2023 11:43:48 -0400 Subject: [PATCH] Updates documentation files --- docs/code/sampling/theta_criterion/README.rst | 2 + .../theta_criterion/pce_theta_criterion.py | 246 +++++++++++++++ .../pce/plot_pce_theta_criterion.py | 297 ------------------ docs/source/conf.py | 2 + docs/source/sampling/index.rst | 3 + docs/source/sampling/theta_criterion.rst | 24 ++ src/UQpy/sampling/ThetaCriterionPCE.py | 26 +- 7 files changed, 289 insertions(+), 311 deletions(-) create mode 100644 docs/code/sampling/theta_criterion/README.rst create mode 100644 docs/code/sampling/theta_criterion/pce_theta_criterion.py delete mode 100644 docs/code/surrogates/pce/plot_pce_theta_criterion.py create mode 100644 docs/source/sampling/theta_criterion.rst diff --git a/docs/code/sampling/theta_criterion/README.rst b/docs/code/sampling/theta_criterion/README.rst new file mode 100644 index 000000000..13ac8db64 --- /dev/null +++ b/docs/code/sampling/theta_criterion/README.rst @@ -0,0 +1,2 @@ +Theta Criterion PCE Examples +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/docs/code/sampling/theta_criterion/pce_theta_criterion.py b/docs/code/sampling/theta_criterion/pce_theta_criterion.py new file mode 100644 index 000000000..ef37babd4 --- /dev/null +++ b/docs/code/sampling/theta_criterion/pce_theta_criterion.py @@ -0,0 +1,246 @@ +""" +Polynomial Chaos Expansion example: Active Learning for Multiple Surrogate Models +====================================================================================== + +In this example, we use active learning for construction of optimal experimental design with respect to exploration of +the design domain and exploitation of given surrogate models in form of Polynomial Chaos Expansion (PCE). Active learning is based on :math:`\Theta` criterion recently proposed in + +L. Novák, M. Vořechovský, V. Sadílek, M. D. Shields, *Variance-based adaptive sequential sampling for polynomial chaos expansion*, 637 Computer Methods in Applied Mechanics and Engineering 386 (2021) 114105. doi:10.1016/j.cma.2021.114105. +""" + +# %% md +# We start with the necessary imports. + +# %% + +import numpy as np +import matplotlib.pyplot as plt +from UQpy.sampling.ThetaCriterionPCE import * + +from UQpy.distributions import Uniform, JointIndependent, Normal +from UQpy.surrogates import * + +from UQpy.sampling import LatinHypercubeSampling +from UQpy.sampling.stratified_sampling.latin_hypercube_criteria import * + + +# %% md +# The example involves a :math:`2D` function with mirrored quarter-circle arc line singularities. The form of the function is give by: +# +# .. math:: f(\mathbf{X})= \frac{1}{ \lvert 0.3-X_1^2 - X_2^2\rvert + \delta}- \frac{1}{ \lvert 0.3-(1-X_1)^2 - (1-X_2)^2\rvert + \delta}, \quad \mathbf{X} \sim \mathcal{U}[0,1]^2 +# +# where the strength of the singularities is controlled by the parameter :math:`\delta`, which we set as :math:`\delta=0.01`. +# +# + +# %% + + +def Model2DComplete(X, delta=0.01): + Y = Model2D1(X) + Model2D2(X) + return Y + + +# %% md +# In order to show the possibilities of active learning for multiple surrogate models, we split the function into the two parts as follows: +# +# .. math:: f_1(\mathbf{X})= \begin{cases} \frac{1}{ \lvert 0.3-X_1^2 - X_2^2\rvert + \delta}-\frac{1}{ \lvert 0.3-(1-X_1)^2 - (1-X_2)^2\rvert + \delta} \quad \text{for} \quad X_1X_2\\ 0 \quad \text{otherwise} \end{cases} + +# %% + + +def Model2D1(X, delta=0.01): + M = X[:, 0] < X[:, 1] + Y = 1 / (np.abs(0.3 - X[:, 0] ** 2 - X[:, 1] ** 2) + delta) - 1 / ( + np.abs(0.3 - (1 - X[:, 0]) ** 2 - (1 - X[:, 1]) ** 2) + delta) + Y[M] = 0 + return Y + + +def Model2D2(X, delta=0.01): + M = X[:, 0] > X[:, 1] + Y = 1 / (np.abs(0.3 - X[:, 0] ** 2 - X[:, 1] ** 2) + delta) - 1 / ( + np.abs(0.3 - (1 - X[:, 0]) ** 2 - (1 - X[:, 1]) ** 2) + delta) + Y[M] = 0 + return Y + +# %% md +# The mathematical models have independent random inputs, which are uniformly distributed in interval :math:`[0, 1]`. + +# %% + + +# input distributions +dist1 = Uniform(loc=0, scale=1) +dist2 = Uniform(loc=0, scale=1) + +marg = [dist1, dist2] +joint = JointIndependent(marginals=marg) + +# %% md +# We must now select a polynomial basis. Here we opt for a total-degree (TD) basis, such that the univariate polynomials have a maximum degree equal to :math:`P` and all multivariate polynomial have a total-degree (sum of degrees of corresponding univariate polynomials) at most equal to :math:`P`. The size of the basis is then given by +# +# .. math:: \frac{(N+P)!}{N! P!} +# +# where :math:`N` is the number of random inputs (here, :math:`N=2`). +# + +# %% + + +# realizations of random inputs +# training data +# maximum polynomial degree +P = 10 +# construct total-degree polynomial basis and use OLS for estimation of coefficients +polynomial_basis = TotalDegreeBasis(joint, P) + +# %% md +# We must now compute the PCE coefficients. For that we first need a training sample of input random variable realizations and the corresponding model outputs. These two data sets form what is also known as an ''experimental design''. In case of adaptive construction of PCE by the best model selection algorithm, size of ED is given apriori and the most suitable basis functions are adaptively selected. Here we have two surrogate models with identical training samples of input random vector and two sets of corresponding mathematical models. This ED represents small initial ED, which will be further extended by active learning algorithm. + +# %% + + +# number of inital traning samples +sample_size = 15 + +# realizations of input random vector +xx_train = joint.rvs(sample_size) + +# corresponding model outputs +yy_train1 = Model2D1(xx_train) +yy_train2 = Model2D2(xx_train) + +# %% md +# We now fit the PCE coefficients by solving a regression problem. Here we opt for the :code:`np.linalg.lstsq` method, which is based on the _dgelsd_ solver of LAPACK. This original PCE class will be used for further selection of the best basis functions. Once we have created the PCE containing all basis functions generated by TD algorithm, it is possible to reduce the number of basis functions by LAR algorithm. We create sparse PCE approximations for both mathematical models as follows. + +# %% + + +least_squares = LeastSquareRegression() + +# fit model 1 +pce1 = PolynomialChaosExpansion(polynomial_basis=polynomial_basis, regression_method=least_squares) +pce1.fit(xx_train, yy_train1) +pceLAR1 = polynomial_chaos.regressions.LeastAngleRegression.model_selection(pce1) + +# fit model 2 +pce2 = PolynomialChaosExpansion(polynomial_basis=polynomial_basis, regression_method=least_squares) +pce2.fit(xx_train, yy_train2) +pceLAR2 = polynomial_chaos.regressions.LeastAngleRegression.model_selection(pce2) + +# %% md +# The active learning algorithm based on $\Theta$ criterion selects the best sample from given large set of candidates coverign uniformly the whole design domain. Here we set number of samples as :math:`n_{cand}=10^4` and use LHS-MaxiMin for sampling, though any sampling technique can be employed. + +# %% + + +# number of candidates for the active learning algorithm +n_cand = 10000 + +# MaxiMin LHS samples uniformly covering the whole input random space +lhs_maximin_cand = LatinHypercubeSampling(distributions=[dist1, dist2], + criterion=MaxiMin(metric=DistanceMetric.CHEBYSHEV), + nsamples=n_cand) + +# candidates for active learning +Xaptive = lhs_maximin_cand._samples + +# %% md +# Start of the active learning algorithm interatively selecting :math:`nsamples=400` samples one-by-one. Note that the class :code:`ThetaCriterionPCE` takes a list of surrogate models as input, here we have 2 PCEs approximated 2 mathematical models. The active learning further selects the best candidate in each run by variance-based :math:`\Theta` criterion. + +# %% + + +# total number of added points by the active learning algorithm +naddedsims = 400 + +# loading of existing ED for both PCEs +Xadapted = xx_train +Yadapted1 = yy_train1 +Yadapted2 = yy_train2 + +# adaptive algorithm and reconstruction of PCE + +for i in range(0, int(naddedsims)): + # create ThetaCriterion class for active learning + ThetaSampling = ThetaCriterionPCE([pceLAR1, pceLAR2]) + + # find the best candidate according to given criterium (variance and distance) + pos = ThetaSampling.run(Xadapted, Xaptive) + newpointX = np.array([Xaptive[pos, :]]) + + newpointres1 = Model2D1(newpointX) + newpointres2 = Model2D2(newpointX) + + # add the best candidate to experimental design + Xadapted = np.append(Xadapted, newpointX, axis=0) + Yadapted1 = np.r_[Yadapted1, newpointres1] + Yadapted2 = np.r_[Yadapted2, newpointres2] + + # reconstruct the PCE 1 + pce1.fit(Xadapted, Yadapted1) + pceLAR1 = polynomial_chaos.regressions.LeastAngleRegression.model_selection(pce1) + + # reconstruct the PCE 2 + pce2.fit(Xadapted, Yadapted2) + pceLAR2 = polynomial_chaos.regressions.LeastAngleRegression.model_selection(pce2) + + if i % 10 == 0: + print('\nNumber of added simulations:', i) + +# plot final ED +fig, ax_nstd = plt.subplots(figsize=(6, 6)) +ax_nstd.plot(Xadapted[:, 0], Xadapted[:, 1], 'ro', label='Adapted ED') +ax_nstd.plot(xx_train[:, 0], xx_train[:, 1], 'bo', label='Original ED') +ax_nstd.set_ylabel(r'$X_2$') +ax_nstd.set_xlabel(r'$X_1$') +ax_nstd.legend(loc='upper left'); + +# %% md +# For a comparison, we construct also a surrogate model of the full original mathematical model and further run active learning similarly as for the previous reduced models. Note that the final ED for this complete mathematical model should be almost identical as in the previous case with the two PCEs approximating reduced models. + +# %% + + +yy_train3 = Model2DComplete(xx_train) +pce3 = PolynomialChaosExpansion(polynomial_basis=polynomial_basis, regression_method=least_squares) +pce3.fit(xx_train, yy_train3) +pceLAR3 = polynomial_chaos.regressions.LeastAngleRegression.model_selection(pce3) + + + +Xadapted3 = xx_train +Yadapted3 = yy_train3 + +# adaptive algorithm and reconstruction of PCE +for i in range(0, int(400)): + # create ThetaCriterion class for active learning + ThetaSampling_complete = ThetaCriterionPCE([pceLAR3]) + + # find the best candidate according to given criterium (variance and distance) + pos = ThetaSampling_complete.run(Xadapted3, Xaptive) + newpointX = np.array([Xaptive[pos, :]]) + newpointres = Model2DComplete(newpointX) + + # add the best candidate to experimental design + Xadapted3 = np.append(Xadapted3, newpointX, axis=0) + Yadapted3 = np.r_[Yadapted3, newpointres] + + pce3.fit(Xadapted3, Yadapted3) + pceLAR3 = polynomial_chaos.regressions.LeastAngleRegression.model_selection(pce3) + + if i % 10 == 0: + print('\nNumber of added simulations:', i) + +# plot final ED +fig, ax_nstd = plt.subplots(figsize=(6, 6)) +ax_nstd.plot(Xadapted3[:, 0], Xadapted3[:, 1], 'ro', label='Adapted ED') +ax_nstd.plot(xx_train[:, 0], xx_train[:, 1], 'bo', label='Original ED') +ax_nstd.set_ylabel(r'$X_2$') +ax_nstd.set_xlabel(r'$X_1$') +ax_nstd.legend(loc='upper left'); diff --git a/docs/code/surrogates/pce/plot_pce_theta_criterion.py b/docs/code/surrogates/pce/plot_pce_theta_criterion.py deleted file mode 100644 index 59bd76838..000000000 --- a/docs/code/surrogates/pce/plot_pce_theta_criterion.py +++ /dev/null @@ -1,297 +0,0 @@ -#!/usr/bin/env python -# coding: utf-8 - -# ### Polynomial Chaos Expansion example: Active Learning for Multiple Surrogate Models -# - -# Authors: Lukas Novak \ -# Date: April 14 2023 - -# In this example, we use active learning for construction of optimal experimental design with respect to exploration of the design domain and exploitation of given surrogate models in form of Polynomial Chaos Expansion (PCE). ACtive learning is based on $\Theta$ criterion recently proposed in -# -# L. Novák, M. Vořechovský, V. Sadílek, M. D. Shields, *Variance-based adaptive sequential sampling for polynomial chaos expansion*, 637 Computer Methods in Applied Mechanics and Engineering 386 (2021) 114105. doi:10.1016/j.cma.2021.114105. - -# We start with the necessary imports. - -# In[3]: - - -# import packages -import numpy as np -import matplotlib.pyplot as plt -from UQpy.sampling.ThetaCriterionPCE import * - -from UQpy.distributions import Uniform, JointIndependent, Normal -from UQpy.surrogates import * - -from UQpy.sampling import LatinHypercubeSampling -from UQpy.sampling.stratified_sampling.latin_hypercube_criteria import * - - -# The example involves a~$2$D function with mirrored quarter-circle arc line singularities \cite{NovVorSadShi:CMAME:21}. The form of the function is give by: -# \begin{equation} -# f(\mathbf{X})= \frac{1}{ \lvert 0.3-X_1^2 - X_2^2\rvert + \delta}- -# \frac{1}{ \lvert 0.3-(1-X_1)^2 - (1-X_2)^2\rvert + \delta}, \quad \mathbf{X} \sim \mathcal{U}[0,1]^2 -# , -# \end{equation} -# where the strength of the singularities is controlled by the parameter $\delta$, which we set as $\delta=0.01$. \ -# -# - -# In[2]: - - -def Model2DComplete(X,delta=0.01): - Y=Model2D1(X)+Model2D2(X) - return Y - - -# In order to show the possibilities of active learning for multiple surrogate models, we split the function into the two parts as follows: -# -# \begin{equation} -# f_1(\mathbf{X})= -# \begin{cases} -# \frac{1}{ \lvert 0.3-X_1^2 - X_2^2\rvert + \delta}- -# \frac{1}{ \lvert 0.3-(1-X_1)^2 - (1-X_2)^2\rvert + \delta} \quad \text{for} \quad X_1X_2\\ -# 0 \quad \text{otherwise} -# \end{cases}, -# \end{equation} - -# In[3]: - - -def Model2D1(X,delta=0.01): - M=X[:,0]X[:,1] - Y=1/(np.abs(0.3 - X[:, 0]**2 - X[:, 1]**2) + delta)-1/(np.abs(0.3 - (1 - X[:, 0])**2 - (1 - X[:, 1])**2) + delta) - Y[M]=0 - return Y - - -# The mathematical models have indepdent random inputs, which are uniformly distributed in interval $[0, 1]$. - -# In[4]: - - -# input distributions -dist1 = Uniform(loc=0, scale=1) -dist2 = Uniform(loc=0, scale=1) - -marg = [dist1, dist2] -joint = JointIndependent(marginals=marg) - - -# We must now select a polynomial basis. Here we opt for a total-degree (TD) basis, such that the univariate polynomials have a maximum degree equal to $P$ and all multivariate polynomial have a total-degree (sum of degrees of corresponding univariate polynomials) at most equal to $P$. The size of the basis is then given by -# $$\frac{(N+P)!}{N! P!},$$ -# where $N$ is the number of random inputs (here, $N=2$). -# -# -# - -# In[5]: - - -# realizations of random inputs -# training data -# maximum polynomial degree -P = 10 -# construct total-degree polynomial basis and use OLS for estimation of coefficients -polynomial_basis = TotalDegreeBasis(joint,P) - - -# We must now compute the PCE coefficients. For that we first need a training sample of input random variable realizations and the corresponding model outputs. These two data sets form what is also known as an ''experimental design''. In case of adaptive construction of PCE by the best model selection algorithm, size of ED is given apriori and the most suitable basis functions are adaptively selected. Here we have two surrogate models with identical training samples of input random vector and two sets of corresponding mathematical models. This ED represents small initial ED, which will be further extended by active learning algorithm. - -# In[6]: - - -# number of inital traning samples -sample_size = 15 - -# realizations of input random vector -xx_train = joint.rvs(sample_size) - -# corresponding model outputs -yy_train1 = Model2D1(xx_train) -yy_train2 = Model2D2(xx_train) - - -# We now fit the PCE coefficients by solving a regression problem. Here we opt for the _np.linalg.lstsq_ method, which is based on the _dgelsd_ solver of LAPACK. This original PCE class will be used for further selection of the best basis functions. Once we have created the PCE containing all basis functions generated by TD algorithm, it is possible to reduce the number of basis functions by LAR algorithm. We create sparse PCE approximations for both mathematical models as follows. - -# In[7]: - - - -least_squares = LeastSquareRegression() - -# fit model 1 -pce1 = PolynomialChaosExpansion(polynomial_basis=polynomial_basis, regression_method=least_squares) -pce1.fit(xx_train, yy_train1) -pceLAR1=polynomial_chaos.regressions.LeastAngleRegression.model_selection(pce1) - - -# fit model 2 -pce2 = PolynomialChaosExpansion(polynomial_basis=polynomial_basis, regression_method=least_squares) -pce2.fit(xx_train, yy_train2) -pceLAR2=polynomial_chaos.regressions.LeastAngleRegression.model_selection(pce2) - - -# The active learning algorithm based on $\Theta$ criterion selects the best sample from given large set of candidates coverign uniformly the whole design domain. Here we set number of samples as $n_{cand}=10^4$ and use LHS-MaxiMin for sampling, though any sampling technique can be employed. - -# In[8]: - - -# number of candidates for the active learning algorithm -n_cand=10000 - -# MaxiMin LHS samples uniformly covering the whole input random space -lhs_maximin_cand = LatinHypercubeSampling(distributions=[dist1, dist2], - criterion=MaxiMin(metric=DistanceMetric.CHEBYSHEV), - nsamples=n_cand) - -# candidates for active learning -Xaptive = lhs_maximin_cand._samples - - -# Start of the active learning algorithm interatively selecting $n_{add}=400$ samples one-by-one. Note that the class ThetaCriterionPCE takes a list of surrogate models as input, here we have 2 PCEs approximated 2 mathematical models. The active learning further selects the best candidate in each run by variance-based $\Theta$ criterion. - -# In[ ]: - - -# total number of added points by the active learning algorithm -naddedsims=400 - -# loading of existing ED for both PCEs -Xadapted=xx_train -Yadapted1=yy_train1 -Yadapted2=yy_train2 - - -# adaptive algorithm and reconstruction of PCE - -for i in range (0,int(naddedsims)): -# create ThetaCriterion class for active learning - ThetaSampling=ThetaCriterionPCE([pceLAR1,pceLAR2]) - -# find the best candidate according to given criterium (variance and distance) - pos=ThetaSampling.run(Xadapted,Xaptive) - newpointX=np.array([Xaptive[pos,:]]) - - newpointres1=Model2D1(newpointX) - newpointres2=Model2D2(newpointX) - -# add the best candidate to experimental design - Xadapted=np.append(Xadapted,newpointX,axis=0) - Yadapted1=np.r_[Yadapted1, newpointres1] - Yadapted2=np.r_[Yadapted2, newpointres2] - - - -# reconstruct the PCE 1 - pce1.fit(Xadapted, Yadapted1) - pceLAR1=polynomial_chaos.regressions.LeastAngleRegression.model_selection(pce1) - - -# reconstruct the PCE 2 - pce2.fit(Xadapted, Yadapted2) - pceLAR2=polynomial_chaos.regressions.LeastAngleRegression.model_selection(pce2) - - - if i%10==0: - print('\nNumber of added simulations:', i) - - - -# plot final ED -fig, ax_nstd = plt.subplots(figsize=(6, 6)) -ax_nstd.plot(Xadapted[:,0],Xadapted[:,1], 'ro',label='Adapted ED') -ax_nstd.plot(xx_train[:,0],xx_train[:,1], 'bo',label='Original ED') -ax_nstd.set_ylabel(r'$X_2$') -ax_nstd.set_xlabel(r'$X_1$') -ax_nstd.legend(loc='upper left'); - - -# For a comparison, we construct also a surrogate model of the full original mathematical model and further run active learning similarly as for the previous reduced models. Note that the final ED for this complete mathematical model should be almost identical as in the previous case with the two PCEs approximating reduced models. - -# In[13]: - - -yy_train3 = Model2DComplete(xx_train) -pce3 = PolynomialChaosExpansion(polynomial_basis=polynomial_basis, regression_method=least_squares) -pce3.fit(xx_train, yy_train3) -pceLAR3=polynomial_chaos.regressions.LeastAngleRegression.model_selection(pce3) - - -# In[ ]: - - -Xadapted3=xx_train -Yadapted3=yy_train3 - - -# adaptive algorithm and reconstruction of PCE -for i in range (0,int(400)): -# create ThetaCriterion class for active learning - ThetaSampling_complete=ThetaCriterionPCE([pceLAR3]) - -# find the best candidate according to given criterium (variance and distance) - pos=ThetaSampling_complete.run(Xadapted3,Xaptive) - newpointX=np.array([Xaptive[pos,:]]) - newpointres=Model2DComplete(newpointX) - - -# add the best candidate to experimental design - Xadapted3=np.append(Xadapted3,newpointX,axis=0) - Yadapted3=np.r_[Yadapted3, newpointres] - - - pce3.fit(Xadapted3, Yadapted3) - pceLAR3=polynomial_chaos.regressions.LeastAngleRegression.model_selection(pce3) - - - if i%10==0: - print('\nNumber of added simulations:', i) - - - -# plot final ED -fig, ax_nstd = plt.subplots(figsize=(6, 6)) -ax_nstd.plot(Xadapted3[:,0],Xadapted3[:,1], 'ro',label='Adapted ED') -ax_nstd.plot(xx_train[:,0],xx_train[:,1], 'bo',label='Original ED') -ax_nstd.set_ylabel(r'$X_2$') -ax_nstd.set_xlabel(r'$X_1$') -ax_nstd.legend(loc='upper left'); - - -# In[ ]: - - - - - -# In[ ]: - - - - - -# In[ ]: - - - - diff --git a/docs/source/conf.py b/docs/source/conf.py index c4230d73b..4d44183fb 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -84,6 +84,7 @@ "../code/sampling/mcmc", "../code/sampling/tempering", "../code/sampling/simplex", + "../code/sampling/theta_criterion", "../code/sampling/true_stratified_sampling", "../code/sampling/refined_stratified_sampling", "../code/inference/mle", @@ -125,6 +126,7 @@ "auto_examples/sampling/mcmc", "auto_examples/sampling/tempering", "auto_examples/sampling/simplex", + "auto_examples/sampling/theta_criterion", "auto_examples/sampling/true_stratified_sampling", "auto_examples/sampling/refined_stratified_sampling", "auto_examples/inference/mle", diff --git a/docs/source/sampling/index.rst b/docs/source/sampling/index.rst index 6dc5fa454..fac6a95b2 100644 --- a/docs/source/sampling/index.rst +++ b/docs/source/sampling/index.rst @@ -17,6 +17,8 @@ The module currently contains the following classes: - :class:`.AdaptiveKriging`: Class generating samples adaptively using a specified Kriging-based learning function in a general Adaptive Kriging-Monte Carlo Sampling (AKMCS) framework +- :class:`.ThetaCriterionPCE`: Active learning for polynomial chaos expansion using Theta criterion balancing between exploration and exploitation. + - :class:`.MCMC`: The goal of Markov Chain Monte Carlo is to draw samples from some probability distribution which is hard to compute - :class:`.ImportanceSampling`: Importance sampling (IS) is based on the idea of sampling from an alternate distribution and reweighing the samples to be representative of the target distribution @@ -31,6 +33,7 @@ The module currently contains the following classes: Refined Stratified Sampling Simplex Sampling Adaptive Kriging + Theta Criterion Markov Chain Monte Carlo Importance Sampling diff --git a/docs/source/sampling/theta_criterion.rst b/docs/source/sampling/theta_criterion.rst new file mode 100644 index 000000000..0e24e7b88 --- /dev/null +++ b/docs/source/sampling/theta_criterion.rst @@ -0,0 +1,24 @@ +Theta Criterion +--------------- + + +ThetaCriterionPCE Class +^^^^^^^^^^^^^^^^^^^^^^^^ + +The :class:`.ThetaCriterionPCE` class is imported using the following command: + +>>> from UQpy.sampling.ThetaCriterionPCE import ThetaCriterionPCE + + +Methods +""""""""""" +.. autoclass:: UQpy.sampling.ThetaCriterionPCE + :members: run + + +Examples +""""""""""" + +.. toctree:: + + Theta Criterion Examples <../auto_examples/sampling/theta_criterion/index> diff --git a/src/UQpy/sampling/ThetaCriterionPCE.py b/src/UQpy/sampling/ThetaCriterionPCE.py index f91dda468..5a71fbd6f 100644 --- a/src/UQpy/sampling/ThetaCriterionPCE.py +++ b/src/UQpy/sampling/ThetaCriterionPCE.py @@ -10,7 +10,7 @@ class ThetaCriterionPCE: def __init__(self, surrogates: list[UQpy.surrogates.polynomial_chaos.PolynomialChaosExpansion]): """ Active learning for polynomial chaos expansion using Theta criterion balancing between exploration and - exploitation. + exploitation. :param surrogates: list of objects of the :py:meth:`UQpy` :class:`PolynomialChaosExpansion` class """ @@ -22,6 +22,7 @@ def run(self, existing_samples: np.ndarray, candidate_samples: np.ndarray, nsamp """ Execute the :class:`.ThetaCriterionPCE` active learning. + :param existing_samples: Samples in existing ED used for construction of PCEs. :param candidate_samples: Candidate samples for selecting by Theta criterion. :param samples_weights: Weights associated to X samples (e.g. from Coherence Sampling). @@ -43,7 +44,6 @@ def run(self, existing_samples: np.ndarray, candidate_samples: np.ndarray, nsamp npce = len(pces) nsimexisting, nvar = existing_samples.shape nsimcandidate, nvar = candidate_samples.shape - l = np.zeros(nsimcandidate) criterium = np.zeros(nsimcandidate) if samples_weights is None: samples_weights = np.ones(nsimexisting) @@ -56,31 +56,29 @@ def run(self, existing_samples: np.ndarray, candidate_samples: np.ndarray, nsamp pos = [] - for n in range(nsamples): + for _ in range(nsamples): S = polynomial_chaos.Polynomials.standardize_sample(existing_samples, pces[0].polynomial_basis.distributions) - Scandidate = polynomial_chaos.Polynomials.standardize_sample(candidate_samples, + s_candidate = polynomial_chaos.Polynomials.standardize_sample(candidate_samples, pces[0].polynomial_basis.distributions) - lengths = cdist(Scandidate, S) - closestS_pos = np.argmin(lengths, axis=1) - closest_valueX = existing_samples[closestS_pos] + lengths = cdist(s_candidate, S) + closest_s_position = np.argmin(lengths, axis=1) + closest_value_x = existing_samples[closest_s_position] l = np.nanmin(lengths, axis=1) variance_candidate = 0 variance_closest = 0 for i in range(npce): - variance_candidatei = 0 - variance_closesti = 0 pce = pces[i] variance_candidatei = self._local_variance(candidate_samples, pce, candidate_weights) - variance_closesti = self._local_variance(closest_valueX, pce, samples_weights[closestS_pos]) + variance_closesti = self._local_variance(closest_value_x, pce, samples_weights[closest_s_position]) variance_candidate = variance_candidate + variance_candidatei * pce_weights[i] variance_closest = variance_closest + variance_closesti * pce_weights[i] - criteriumV = np.sqrt(variance_candidate * variance_closest) - criteriumL = l ** nvar - criterium = criteriumV * criteriumL + criterium_v = np.sqrt(variance_candidate * variance_closest) + criterium_l = l ** nvar + criterium = criterium_v * criterium_l pos.append(np.argmax(criterium)) existing_samples = np.append(existing_samples, candidate_samples[pos, :], axis=0) samples_weights = np.append(samples_weights, candidate_weights[pos]) @@ -90,7 +88,7 @@ def run(self, existing_samples: np.ndarray, candidate_samples: np.ndarray, nsamp pos = pos[0] return pos else: - return variance_candidate, criteriumV, criteriumL, criterium + return variance_candidate, criterium_v, criterium_l, criterium # calculate variance density of PCE for Theta Criterion @staticmethod