diff --git a/docs/code/surrogates/pce/plot_pce_euler_UQ.py b/docs/code/surrogates/pce/plot_pce_euler_UQ.py index 5913dfbb6..b78dceb75 100644 --- a/docs/code/surrogates/pce/plot_pce_euler_UQ.py +++ b/docs/code/surrogates/pce/plot_pce_euler_UQ.py @@ -17,12 +17,11 @@ # load PC^2 from UQpy.surrogates.polynomial_chaos.polynomials.TotalDegreeBasis import TotalDegreeBasis -from UQpy.surrogates.polynomial_chaos.physics_informed.ConstrainedPCE import ConstrainedPce +from UQpy.surrogates.polynomial_chaos.physics_informed.ConstrainedPCE import ConstrainedPCE from UQpy.surrogates.polynomial_chaos.physics_informed.PdeData import PdeData -from UQpy.surrogates.polynomial_chaos.physics_informed.PdePCE import PdePce +from UQpy.surrogates.polynomial_chaos.physics_informed.PdePCE import PdePCE from UQpy.surrogates.polynomial_chaos.physics_informed.Utilities import * -from UQpy.surrogates.polynomial_chaos.physics_informed.ReducedPCE import ReducedPce - +from UQpy.surrogates.polynomial_chaos.physics_informed.ReducedPCE import ReducedPCE # %% md @@ -32,25 +31,28 @@ # %% # Definition of PDE/ODE -def pde_func(S, pce): +def pde_func(standardized_sample, pce): der_order = 4 - deriv_0_PCE = derivative_basis(S, pce, der_order=der_order, variable=0) * ((2 / 1) ** der_order) + deriv_pce = derivative_basis(standardized_sample, pce, derivative_order=der_order, leading_variable=0) * ((2 / 1) ** der_order) - pde_basis = deriv_0_PCE + pde_basis = deriv_pce return pde_basis + # Definition of the source term -def pde_res(S): - load_s = Load(S) +def pde_res(standardized_sample): + load_s = load(standardized_sample) return -load_s[:, 0] -def Load(S): - return Const_Load(S) -def Const_Load(S): - l = (1+(1+S[:, 1])/2).reshape(-1,1) +def load(standardized_sample): + return const_load(standardized_sample) + + +def const_load(standardized_sample): + l = (1 + (1 + standardized_sample[:, 1]) / 2).reshape(-1, 1) return l @@ -60,7 +62,7 @@ def bc_sampling(nsim=1000): nsim_half = round(nsim / 2) sample = np.zeros((nsim, 2)) - real_ogrid_1d = ortho_grid(nsim_half, 1, 0, 1)[:, 0] + real_ogrid_1d = ortho_grid(nsim_half, 1, 0.0, 1.0)[:, 0] sample[:nsim_half, 0] = np.zeros(nsim_half) sample[:nsim_half, 1] = real_ogrid_1d @@ -70,34 +72,35 @@ def bc_sampling(nsim=1000): return sample + # define sampling and evaluation of BC for estimation of error def bc_res(nsim, pce): - bc_x = np.zeros((2, 2)) - bc_x[1, 0] = 1 - bc_s = polynomial_chaos.Polynomials.standardize_sample(bc_x, pce.polynomial_basis.distributions) + physical_sample= np.zeros((2, 2)) + physical_sample[1, 0] = 1 + standardized_sample = polynomial_chaos.Polynomials.standardize_sample(physical_sample, pce.polynomial_basis.distributions) der_order = 2 - deriv_0_PCE = np.sum( - derivative_basis(bc_s, pce, der_order=der_order, variable=0) * ((2 / 1) ** der_order) * np.array( - pce.coefficients).T, axis=1) + deriv_pce = np.sum( + derivative_basis(standardized_sample, pce, derivative_order=der_order, leading_variable=0) * + ((2 / 1) ** der_order) * np.array(pce.coefficients).T, axis=1) - return deriv_0_PCE + return deriv_pce # Definition of the reference solution for an error estimation -def ref_sol(x, q): - return (q+1)*(-(x ** 4) / 24 + x ** 3 / 12 - x / 24) +def ref_sol(physical_coordinate, q): + return (q + 1) * (-(physical_coordinate ** 4) / 24 + physical_coordinate ** 3 / 12 - physical_coordinate / 24) # %% md # # Beam equation is parametrized by one deterministic input variable (coordinate :math:`x`) and -# random load intesity :math:`q`, both modeled as Uniform random variables. +# random load intensity :math:`q`, both modeled as Uniform random variables. # number of input variables -nrand=1 -nvar = 1+nrand +nrand = 1 +nvar = 1 + nrand # definition of a joint probability distribution dist1 = Uniform(loc=0, scale=1) @@ -105,7 +108,6 @@ def ref_sol(x, q): marg = [dist1, dist2] joint = JointIndependent(marginals=marg) - # %% md # # The physical domain is defined by :math:`x \in [0,1]` @@ -145,7 +147,7 @@ def ref_sol(x, q): # # Further we construct an object containing PDE physical data and PC :math:`^2` definitions of PDE -pde_pce = PdePce(pde_data, pde_func, pde_res=pde_res, bc_res=bc_res) +pde_pce = PdePCE(pde_data, pde_func, pde_res=pde_res, bc_res=bc_res) # %% md # @@ -168,36 +170,35 @@ def ref_sol(x, q): # create initial PCE object containing basis, regression method and dirichlet BC initpce = PolynomialChaosExpansion(polynomial_basis=polynomial_basis, regression_method=least_squares) -initpce.set_ed(x_train, y_train) +initpce.set_data(x_train, y_train) # construct a PC^2 object combining pde_data, pde_pce and initial PCE objects -pcpc = ConstrainedPce(pde_data, pde_pce, initpce) +pcpc = ConstrainedPCE(pde_data, pde_pce, initpce) # get coefficients of PC^2 by least angle regression pcpc.lar() # get coefficients of PC^2 by ordinary least squares pcpc.ols() - # evaluate errors of approximations -real_ogrid = ortho_grid(100, nvar, 0, 1) +real_ogrid = ortho_grid(100, nvar, 0.0, 1.0) yy_val_pce = pcpc.lar_pce.predict(real_ogrid).flatten() yy_val_pce_ols = pcpc.initial_pce.predict(real_ogrid).flatten() -yy_val_true = ref_sol(real_ogrid[:,0],real_ogrid[:,1]).flatten() +yy_val_true = ref_sol(real_ogrid[:, 0], real_ogrid[:, 1]).flatten() err = np.abs(yy_val_pce - yy_val_true) tot_err = np.sum(err) -print('\nTotal approximation error by PC^2-LAR: ',tot_err) +print('\nTotal approximation error by PC^2-LAR: ', tot_err) err_ols = np.abs(yy_val_pce_ols - yy_val_true) tot_err_ols = np.sum(err_ols) -print('Total approximation error by PC^2-OLS: ',tot_err_ols) +print('Total approximation error by PC^2-OLS: ', tot_err_ols) # %% md # # Once the PC :math:`^2` is constructed, we use ReducedPce class to filter out influence of deterministic variable in UQ -reduced_pce=ReducedPce(pcpc.lar_pce, n_det=1) +reduced_pce = ReducedPCE(pcpc.lar_pce, n_deterministic=1) # %% md # @@ -221,9 +222,11 @@ def ref_sol(x, q): uq = np.zeros((1 + n_derivations, nrand)) for d in range(1 + n_derivations): if d == 0: - coeff = (reduced_pce.eval_coord(x, return_coeff=True)) + coeff = (reduced_pce.evaluate_coordinate(np.array(x), return_coefficients=True)) else: - coeff = reduced_pce.derive_coord(x, der_order=d, der_var=0, return_coeff=True) + coeff = reduced_pce.derive_coordinate(np.array(x), derivative_order=d, leading_variable=0, + return_coefficients=True, + derivative_multiplier=transformation_multiplier(pde_data, 0, d)) mean[d] = coeff[0] var[d] = np.sum(coeff[1:] ** 2) variances[d, :] = reduced_pce.variance_contributions(coeff) @@ -251,24 +254,20 @@ def ref_sol(x, q): # plot results import matplotlib.pyplot as plt - - fig, ax = plt.subplots(1, 4, figsize=(14.5, 3)) colors = ['black', 'blue', 'green', 'red'] for d in range(2, 1 + n_derivations): + ax[d - 1].plot(beam_x, mean_res[:, d], color=colors[d - 1]) + ax[d - 1].plot(beam_x, mean_res[:, d] + sigma_mult * np.sqrt(vartot_res[:, d]), color=colors[d - 1]) + ax[d - 1].plot(beam_x, mean_res[:, d] - sigma_mult * np.sqrt(vartot_res[:, d]), color=colors[d - 1]) - ax[d-1].plot(beam_x, mean_res[:, d], color=colors[d-1]) - ax[d-1].plot(beam_x, mean_res[:, d] + sigma_mult * np.sqrt(vartot_res[:, d]), color=colors[d-1]) - ax[d-1].plot(beam_x, mean_res[:, d] - sigma_mult * np.sqrt(vartot_res[:, d]), color=colors[d-1]) + ax[d - 1].plot(beam_x, lower_quantiles_modes[:, d, 0], '--', alpha=0.7, color=colors[d - 1]) + ax[d - 1].plot(beam_x, upper_quantiles_modes[:, d, 0], '--', alpha=0.7, color=colors[d - 1]) + ax[d - 1].fill_between(beam_x, lower_quantiles_modes[:, d, 0], upper_quantiles_modes[:, d, 0], + facecolor=colors[d - 1], alpha=0.05) - - ax[d-1].plot(beam_x, lower_quantiles_modes[:, d, 0], '--', alpha=0.7, color=colors[d-1]) - ax[d-1].plot(beam_x, upper_quantiles_modes[:, d, 0], '--', alpha=0.7, color=colors[d-1]) - ax[d-1].fill_between(beam_x, lower_quantiles_modes[:, d, 0], upper_quantiles_modes[:, d, 0], - facecolor=colors[d-1], alpha=0.05) - - ax[d-1].plot(beam_x, np.zeros(len(beam_x)), color='black') + ax[d - 1].plot(beam_x, np.zeros(len(beam_x)), color='black') ax[0].plot(beam_x, mean_res[:, 0], color=colors[0]) ax[0].plot(beam_x, mean_res[:, 0] + sigma_mult * np.sqrt(vartot_res[:, 0]), color=colors[0]) @@ -277,7 +276,7 @@ def ref_sol(x, q): ax[0].plot(beam_x, lower_quantiles_modes[:, 0, 0], '--', alpha=0.7, color=colors[0]) ax[0].plot(beam_x, upper_quantiles_modes[:, 0, 0], '--', alpha=0.7, color=colors[0]) ax[0].fill_between(beam_x, lower_quantiles_modes[:, 0, 0], upper_quantiles_modes[:, 0, 0], - facecolor=colors[0], alpha=0.05) + facecolor=colors[0], alpha=0.05) ax[0].plot(beam_x, np.zeros(len(beam_x)), color='black') @@ -292,4 +291,3 @@ def ref_sol(x, q): for axi in ax.flatten(): axi.set_xlabel(r'$x$') plt.show() - diff --git a/docs/code/surrogates/pce/plot_pce_wave.py b/docs/code/surrogates/pce/plot_pce_wave.py index deb73fa5e..e25a6ca2a 100644 --- a/docs/code/surrogates/pce/plot_pce_wave.py +++ b/docs/code/surrogates/pce/plot_pce_wave.py @@ -19,11 +19,12 @@ from UQpy.sampling import LatinHypercubeSampling # load PC^2 -from UQpy.surrogates.polynomial_chaos.physics_informed.ConstrainedPCE import ConstrainedPce +from UQpy.surrogates.polynomial_chaos.physics_informed.ConstrainedPCE import ConstrainedPCE from UQpy.surrogates.polynomial_chaos.physics_informed.PdeData import PdeData -from UQpy.surrogates.polynomial_chaos.physics_informed.PdePCE import PdePce +from UQpy.surrogates.polynomial_chaos.physics_informed.PdePCE import PdePCE from UQpy.surrogates.polynomial_chaos.physics_informed.Utilities import * + # %% md # # We then define functions used for construction of Physically Constrained PCE (PC :math:`^2`) @@ -36,12 +37,12 @@ def pde_func(s, pce): # partial derivation according to the first input variable x (second order) # derived basis must be multiplied by constant reflecting the difference between physical and standardized space - deriv_0_pce = derivative_basis(s, pce, der_order=2, variable=0) * transformation_multiplier( - pde_data, var=0, - derivation_order=2) - deriv_1_pce = derivative_basis(s, pce, der_order=2, variable=1) * transformation_multiplier(pde_data, - var=1, - derivation_order=2) + deriv_0_pce = derivative_basis(s, pce, derivative_order=2, leading_variable=0) * transformation_multiplier(pde_data, + leading_variable=0, + derivation_order=2) + deriv_1_pce = derivative_basis(s, pce, derivative_order=2, leading_variable=1) * transformation_multiplier(pde_data, + leading_variable=1, + derivation_order=2) pde_basis = deriv_1_pce - 4 * deriv_0_pce return pde_basis @@ -58,7 +59,7 @@ def bc_sampling(nsim=1000): nsim_half = round(nsim / 2) sample = np.zeros((nsim, 2)) - real_ogrid_1d = ortho_grid(nsim_half, 1, 0, 2)[:, 0] + real_ogrid_1d = ortho_grid(nsim_half, 1, 0.0, 2.0)[:, 0] sample[:nsim_half, 0] = np.zeros(int(nsim / 2)) sample[:nsim_half, 1] = real_ogrid_1d @@ -77,7 +78,7 @@ def bc_res(nsim, pce): der_order = 0 deriv_0_pce = np.sum( - derivative_basis(bc_s, pce, der_order=der_order, variable=0) * ((2 / 1) ** der_order) * np.array( + derivative_basis(bc_s, pce, derivative_order=der_order, leading_variable=0) * ((2 / 1) ** der_order) * np.array( pce.coefficients).T, axis=1) bc_init_x, bc_init_y = init_sampling(nsim) @@ -85,12 +86,14 @@ def bc_res(nsim, pce): der_order = 0 deriv_0_init = np.sum( - derivative_basis(bc_init_s, pce, der_order=der_order, variable=0) * ((2 / 1) ** der_order) * np.array( + derivative_basis(bc_init_s, pce, derivative_order=der_order, leading_variable=0) * ( + (2 / 1) ** der_order) * np.array( pce.coefficients).T, axis=1) der_order = 1 deriv_1_init = np.sum( - derivative_basis(bc_init_s, pce, der_order=der_order, variable=1) * ((2 / 1) ** der_order) * np.array( + derivative_basis(bc_init_s, pce, derivative_order=der_order, leading_variable=1) * ( + (2 / 1) ** der_order) * np.array( pce.coefficients).T, axis=1) return deriv_0_pce + np.abs(deriv_0_init - bc_init_y[:, 0]) + deriv_1_init @@ -100,7 +103,7 @@ def bc_res(nsim, pce): def init_sampling(nsim=1000): init_x = joint.rvs(nsim) - init_x[:, 0] = ortho_grid(nsim, 1, 0, 1)[:, 0] + init_x[:, 0] = ortho_grid(nsim, 1, 0.0, 1.0)[:, 0] # initial conditions are defined for t=0 init_x[:, 1] = np.zeros(nsim) @@ -181,8 +184,7 @@ def ref_sol(x): # Further we construct an object containing PDE physical data and PC :math:`^2` definitions of PDE - -pde_pce = PdePce(pde_data, pde_func, pde_res=pde_res, virt_func=virt_sampling, bc_func=bc_sampling, bc_res=bc_res) +pde_pce = PdePCE(pde_data, pde_func, pde_res=pde_res, virt_func=virt_sampling, bc_func=bc_sampling, bc_res=bc_res) # %% md # @@ -209,10 +211,10 @@ def ref_sol(x): # create initial PCE object containing basis, regression method and dirichlet BC initpce = PolynomialChaosExpansion(polynomial_basis=polynomial_basis, regression_method=least_squares) - initpce.set_ed(x_train, y_train) + initpce.set_data(x_train, y_train) # construct a PC^2 object combining pde_data, pde_pce and initial PCE objects - pcpc = ConstrainedPce(pde_data, pde_pce, initpce) + pcpc = ConstrainedPCE(pde_data, pde_pce, initpce) # get coefficients of PC^2 by KKT-OLS pcpc.ols(n_PI=10000) @@ -230,10 +232,10 @@ def ref_sol(x): # create initial PCE object containing basis, regression method and dirichlet BC initpce = PolynomialChaosExpansion(polynomial_basis=polynomial_basis, regression_method=least_squares) -initpce.set_ed(x_train, y_train) +initpce.set_data(x_train, y_train) # construct a PC^2 object combining pde_data, pde_pce and initial PCE objects -pcpc = ConstrainedPce(pde_data, pde_pce, initpce) +pcpc = ConstrainedPCE(pde_data, pde_pce, initpce) # get coefficients of PC^2 by KKT-OLS pcpc.ols(n_PI=10000) @@ -248,7 +250,7 @@ def ref_sol(x): # plot the results and calculate absolute error -real_ogrid = ortho_grid(200, nvar, 0, 2) +real_ogrid = ortho_grid(200, nvar, 0.0, 2.0) real_ogrid[:, 0] = real_ogrid[:, 0] / 2 yy_val_pce = pcpc.initial_pce.predict(real_ogrid).flatten() @@ -256,9 +258,10 @@ def ref_sol(x): abs_err = np.abs(yy_val_true - yy_val_pce) print('\nComparison to the reference solution:') -print('Mean squared error of PC^2: ', np.mean(abs_err**2)) +print('Mean squared error of PC^2: ', np.mean(abs_err ** 2)) import matplotlib.pyplot as plt + vmin = yy_val_pce.min() vmax = yy_val_pce.max() norm = plt.Normalize(vmin=vmin, vmax=vmax) @@ -288,4 +291,4 @@ def ref_sol(x): ax[1].set_title('Absolute error') ax[1].set_xlabel('coordinate $x$') fig.colorbar(plt.cm.ScalarMappable(norm=norm_err, cmap=cmap_err), ax=ax[1]) -plt.show() \ No newline at end of file +plt.show() diff --git a/docs/source/surrogates/pce/physics_informed.rst b/docs/source/surrogates/pce/physics_informed.rst index 29602625b..14465c3ac 100644 --- a/docs/source/surrogates/pce/physics_informed.rst +++ b/docs/source/surrogates/pce/physics_informed.rst @@ -21,11 +21,11 @@ governing differential equation. It is imported using the following command: PdePce class """"""""""""""""""""""""""""""""""" -The second class in the PC :math:`^2` framework is :class:`.PdePce` containing PDE physical data and definitions of PDE +The second class in the PC :math:`^2` framework is :class:`.PdePCE` containing PDE physical data and definitions of PDE in PCE context. The class is imported using the following command: ->>> from UQpy.surrogates.polynomial_chaos.physics_informed.PdePCE import PdePce +>>> from UQpy.surrogates.polynomial_chaos.physics_informed.PdePCE import PdeCE .. autoclass:: UQpy.surrogates.polynomial_chaos.physics_informed.PdePCE.PdePce @@ -37,7 +37,7 @@ ConstrainedPCE class Finally, a numerical solvers based on Karush-Kuhn-Tucker normal equations are defined in the :class:`.ConstrainedPCE` imported using the following command: ->>> from UQpy.surrogates.polynomial_chaos.physics_informed.ConstrainedPCE import ConstrainedPce +>>> from UQpy.surrogates.polynomial_chaos.physics_informed.ConstrainedPCE import ConstrainedPCE .. autoclass:: UQpy.surrogates.polynomial_chaos.physics_informed.ConstrainedPCE.ConstrainedPce @@ -62,7 +62,7 @@ Their influence can be filtered out by the :class:`.ReducedPCE` class. The :class:`.ReducedPCE` class is imported using the following command: ->>> from UQpy.surrogates.polynomial_chaos.physics_informed.ReducedPCE import ReducedPce +>>> from UQpy.surrogates.polynomial_chaos.physics_informed.ReducedPCE import ReducedPCE .. autoclass:: UQpy.surrogates.polynomial_chaos.physics_informed.ReducedPCE.ReducedPce :members: diff --git a/src/UQpy/surrogates/polynomial_chaos/PolynomialChaosExpansion.py b/src/UQpy/surrogates/polynomial_chaos/PolynomialChaosExpansion.py index 61eb324de..c61d6d627 100644 --- a/src/UQpy/surrogates/polynomial_chaos/PolynomialChaosExpansion.py +++ b/src/UQpy/surrogates/polynomial_chaos/PolynomialChaosExpansion.py @@ -52,7 +52,15 @@ def polynomials_number(self): def inputs_number(self): return self.polynomial_basis.inputs_number - def set_ed(self, x: np.ndarray, y: np.ndarray): + def set_data(self, x: np.ndarray, y: np.ndarray): + """ + Set the experimental design of the surrogate model. + + :param x: containing the training points. + :param y: containing the model evaluations at the training points. + + The :meth:`set_data` method has no returns and it creates an :class:`numpy.ndarray` with the design matrix. + """ self.experimental_design_input = x self.experimental_design_output = y self.design_matrix = self.polynomial_basis.evaluate_basis(x) @@ -68,7 +76,7 @@ def fit(self, x: np.ndarray, y: np.ndarray): The :meth:`fit` method has no returns and it creates an :class:`numpy.ndarray` with the polynomial_chaos coefficients. """ - self.set_ed(x,y) + self.set_data(x,y) self.logger.info("UQpy: Running polynomial_chaos.fit") self.coefficients, self.bias, self.outputs_number = self.regression_method.run(x, y, self.design_matrix) self.logger.info("UQpy: polynomial_chaos fit complete.") diff --git a/src/UQpy/surrogates/polynomial_chaos/physics_informed/ConstrainedPCE.py b/src/UQpy/surrogates/polynomial_chaos/physics_informed/ConstrainedPCE.py index 8e08ad4b5..3e4dd27dd 100644 --- a/src/UQpy/surrogates/polynomial_chaos/physics_informed/ConstrainedPCE.py +++ b/src/UQpy/surrogates/polynomial_chaos/physics_informed/ConstrainedPCE.py @@ -1,16 +1,19 @@ import numpy as np -from scipy import special as sp -from scipy.special import legendre + from sklearn import linear_model as regresion from UQpy.surrogates import * -from UQpy.distributions.collection import Uniform, Normal import UQpy.surrogates.polynomial_chaos.physics_informed.Utilities as utils import copy +from UQpy.surrogates.polynomial_chaos.physics_informed.PdePCE import PdePCE +from UQpy.surrogates.polynomial_chaos.physics_informed.PdeData import PdeData from beartype import beartype -class ConstrainedPce: +import logging + + +class ConstrainedPCE: @beartype - def __init__(self, pde_data, pde_pce, pce): + def __init__(self, pde_data: PdeData, pde_pce: PdePCE, pce: PolynomialChaosExpansion): """ Class for construction of physics-informed PCE using Karush-Kuhn-Tucker normal equations @@ -30,24 +33,36 @@ def __init__(self, pde_data, pde_pce, pce): self.y_extended = None self.kkt = None - def estimate_error(self, pce, verif_S): + @beartype + def estimate_error(self, pce: PolynomialChaosExpansion, standardized_sample: np.ndarray): + """ + Estimate an error of the physics-informed PCE consisting of three parts: prediction errors in training data, + errors in boundary conditions and violations of given PDE. Total error is sum of individual mean squared errors. + + :param pce: initial pce, an object of the :py:meth:`UQpy` :class:`PolynomialChaosExpansion` class + :param standardized_sample: virtual samples in standardized space for evaluation of errors in PDE + + :return: estimated total mean squared error + """ ypce = pce.predict(pce.experimental_design_input) err_data = (np.sum((pce.experimental_design_output - ypce) ** 2) / len(ypce)) err_pde = np.abs( - self.pde_pce.pde_eval(verif_S, pce, coefficients=pce.coefficients) - self.pde_pce.pderes_eval(verif_S, - multindex=pce.multi_index_set, - coefficients=pce.coefficients)) + self.pde_pce.pde_eval(standardized_sample, pce, coefficients=pce.coefficients) - self.pde_pce.pderes_eval( + standardized_sample, + multindex=pce.multi_index_set, + coefficients=pce.coefficients)) err_pde = np.mean(err_pde ** 2) - err_bc = self.pde_pce.bc_eval(len(verif_S), pce) + err_bc = self.pde_pce.bc_eval(len(standardized_sample), pce) err_bc = np.mean(err_bc ** 2) err_complete = (err_data + err_pde + err_bc) return err_complete - def lar(self, n_PI=50, virtual_niters=False, max_niter=None, no_iter=False, minsize_basis=1, nvirtual=None, - target_error=0): + @beartype + def lar(self, n_PI: int = 50, virtual_niters: bool = False, max_niter: int = None, no_iter: bool = False, minsize_basis: int = 1, nvirtual: int = -1, + target_error: float = 0): """ Fit the sparse physics-informed PCE by Least Angle Regression from Karush-Kuhn-Tucker normal equations @@ -56,18 +71,18 @@ def lar(self, n_PI=50, virtual_niters=False, max_niter=None, no_iter=False, mins :param max_niter: maximum number of iterations for construction of LAR Path :param no_iter: use all obtained basis functions in the first step, i.e. no iterations :param minsize_basis: minimum number of basis functions for starting the iterative process - :param nvirtual: set number of virtual points + :param nvirtual: set number of virtual points, -1 corresponds to the optimal number :param target_error: target error of iterative process """ self.ols(calc_coeff=False, nvirtual=nvirtual) - + logger = logging.getLogger(__name__) pce = copy.deepcopy(self.initial_pce) if self.pde_pce.virt_func is None: - verif_S = utils.ortho_grid(n_PI, pce.inputs_number, -1, 1) + virtual_samples = utils.ortho_grid(n_PI, pce.inputs_number, -1.0, 1.0) else: virtual_x = self.pde_pce.virt_func(n_PI) - verif_S = polynomial_chaos.Polynomials.standardize_sample(virtual_x, pce.polynomial_basis.distributions) + virtual_samples = polynomial_chaos.Polynomials.standardize_sample(virtual_x, pce.polynomial_basis.distributions) if max_niter is None: max_niter = self.pde_data.nconst + 200 @@ -76,13 +91,14 @@ def lar(self, n_PI=50, virtual_niters=False, max_niter=None, no_iter=False, mins steps = len(lar_path) - print('Obtained Steps: ', steps) + logger.info('Cardinality of the identified sparse basis set: {}'.format(int(steps))) + multindex = self.initial_pce.multi_index_set if steps < 3: raise Exception('LAR identified constant function! Check your data.') - best_err = np.inf + best_error = np.inf lar_basis = [] lar_index = [] lar_error = [] @@ -93,10 +109,10 @@ def lar(self, n_PI=50, virtual_niters=False, max_niter=None, no_iter=False, mins if minsize_basis > steps - 2 or no_iter == True: minsize_basis = steps - 3 - print('\nStart of the iterative LAR algorithm:') - print('-------------------------------------') + logger.info('Start of the iterative LAR algorithm ({} steps)'.format(steps - 2 - minsize_basis)) + for i in range(minsize_basis, steps - 2): - print('\nStep No. ', i) + mask = lar_path[:i] mask = np.concatenate([[0], mask]) @@ -109,49 +125,48 @@ def lar(self, n_PI=50, virtual_niters=False, max_niter=None, no_iter=False, mins pce.polynomial_basis.polynomials_number = len(basis_step) pce.polynomial_basis.polynomials = basis_step pce.multi_index_set = multindex_step - pce.set_ed(pce.experimental_design_input, pce.experimental_design_output) + pce.set_data(pce.experimental_design_input, pce.experimental_design_output) pce.coefficients = self.ols(pce, nvirtual=nvirtual) - err = self.estimate_error(pce, verif_S) - print('Error: ', err) + err = self.estimate_error(pce, virtual_samples) + lar_error.append(err) - if err < best_err: - best_err = err + if err < best_error: + best_error = err best_basis = basis_step best_index = multindex_step - if best_err < target_error: + if best_error < target_error: break - print('End of the iterative LAR algorithm:') - print('-------------------------------------') - - print('\nLowest error: ', best_err) + logger.info('End of the iterative LAR algorithm') + logger.info('Lowest obtained error {}'.format(best_error)) if len(lar_error) > 1: pce.polynomial_basis.polynomials_number = len(best_basis) pce.polynomial_basis.polynomials = best_basis pce.multi_index_set = best_index - pce.set_ed(pce.experimental_design_input, pce.experimental_design_output) + pce.set_data(pce.experimental_design_input, pce.experimental_design_output) pce.coefficients = self.ols(pce, nvirtual=nvirtual) - err = self.estimate_error(pce, verif_S) - print('\nFinal PCE error: ', err) + err = self.estimate_error(pce, virtual_samples) + logger.info('Final PCE error {}'.format(err)) self.lar_pce = pce self.lar_basis = best_basis self.lar_multindex = best_index - self.lar_error = best_err + self.lar_error = best_error self.lar_error_path = lar_error - def ols(self, pce=None, nvirtual=None, calc_coeff=True, return_coeff=True, n_PI=100): + @beartype + def ols(self, pce: PolynomialChaosExpansion = None, nvirtual: int = -1, calc_coeff: bool = True, return_coeff: bool = True, n_PI: int = 100): """ Fit the sparse physics-informed PCE by ordinary least squares from Karush-Kuhn-Tucker normal equations - :param pce: an object of the :py:meth:`UQpy` :class:`PolynomialChaosExpansion` class - :param nvirtual: set number of virtual points + :param pce: an object of the :class:`PolynomialChaosExpansion` class + :param nvirtual: set number of virtual points, -1 corresponds to the optimal number :param calc_coeff: if True, estimate deterministic coefficients. Othewise, construct only KKT normal equations for sparse solvers :param return_coeff: if True, return coefficients of pce @@ -172,7 +187,7 @@ def ols(self, pce=None, nvirtual=None, calc_coeff=True, return_coeff=True, n_PI= n_constraints = self.pde_data.nconst card_basis, nvar = multindex.shape - if nvirtual is None: + if nvirtual == -1: nvirtual = card_basis - n_constraints if nvirtual < 0: @@ -222,8 +237,8 @@ def ols(self, pce=None, nvirtual=None, calc_coeff=True, return_coeff=True, n_PI= bc_res = samples[:, -1] coord_s = polynomial_chaos.Polynomials.standardize_sample(coord_x, pce.polynomial_basis.distributions) - ac = self.derivative_basis(coord_s, pce, der_order=self.pde_data.der_orders[i], - variable=leadvariable) + ac = utils.derivative_basis(coord_s, pce, derivative_order=self.pde_data.der_orders[i], + leading_variable=int(leadvariable)) a_const.append(ac) b_const.append(bc_res.reshape(-1, 1)) @@ -255,68 +270,12 @@ def ols(self, pce=None, nvirtual=None, calc_coeff=True, return_coeff=True, n_PI= if not return_coeff: self.initial_pce.coefficients = a_opt_c if self.pde_pce.virt_func is None: - verif_S = utils.ortho_grid(n_PI, pce.inputs_number, -1, 1) + standardized_sample = utils.ortho_grid(n_PI, pce.inputs_number, -1.0, 1.0) else: virtual_x = self.pde_pce.virt_func(n_PI) - verif_S = polynomial_chaos.Polynomials.standardize_sample(virtual_x, + standardized_sample = polynomial_chaos.Polynomials.standardize_sample(virtual_x, pce.polynomial_basis.distributions) - err = self.estimate_error(self.initial_pce, verif_S) + err = self.estimate_error(self.initial_pce, standardized_sample) self.ols_err = err else: return a_opt_c - - # def ols_iter(self,): - - def derivative_basis(self, s, pce=None, der_order=0, variable=None): - - if pce is None: - multindex = self.initial_pce.multi_index_set - joint_distribution = self.initial_pce.polynomial_basis.distributions - else: - multindex = pce.multi_index_set - joint_distribution = pce.polynomial_basis.distributions - - card_basis, nvar = multindex.shape - - if nvar == 1: - marginals = [joint_distribution] - else: - marginals = joint_distribution.marginals - - mask_herm = [type(marg) == Normal for marg in marginals] - mask_lege = [type(marg) == Uniform for marg in marginals] - - if variable is not None: - - ns = multindex[:, variable] - polysd = [] - - if mask_lege[variable]: - - for n in ns: - polysd.append(legendre(n).deriv(der_order)) - - prep_l_deriv = np.sqrt((2 * multindex[:, variable] + 1)).reshape(-1, 1) - - prep_deriv = [] - for poly in polysd: - prep_deriv.append(np.polyval(poly, s[:, variable]).reshape(-1, 1)) - - prep_deriv = np.array(prep_deriv) - - mask_herm[variable] = False - mask_lege[variable] = False - - prep_hermite = sp.eval_hermitenorm(multindex[:, mask_herm][:, np.newaxis, :], s[:, mask_herm]) - prep_legendre = sp.eval_legendre(multindex[:, mask_lege][:, np.newaxis, :], s[:, mask_lege]) - - prep_fact = np.sqrt(sp.factorial(multindex[:, mask_herm])) - prep = np.sqrt((2 * multindex[:, mask_lege] + 1)) - - multivariate_basis = np.prod(prep_hermite / prep_fact[:, np.newaxis, :], axis=2).T - multivariate_basis *= np.prod(prep_legendre * prep[:, np.newaxis, :], axis=2).T - - if variable is not None: - multivariate_basis *= np.prod(prep_deriv * prep_l_deriv[:, np.newaxis, :], axis=2).T - - return multivariate_basis diff --git a/src/UQpy/surrogates/polynomial_chaos/physics_informed/PdeData.py b/src/UQpy/surrogates/polynomial_chaos/physics_informed/PdeData.py index 1e03929e0..b31cf336d 100644 --- a/src/UQpy/surrogates/polynomial_chaos/physics_informed/PdeData.py +++ b/src/UQpy/surrogates/polynomial_chaos/physics_informed/PdeData.py @@ -1,5 +1,6 @@ import numpy as np + class PdeData: def __init__(self, geometry_xmax, geometry_xmin, der_orders, bc_normals, bc_x, bc_y): """ @@ -73,4 +74,3 @@ def get_bcsamples(self, order): bcsamples = np.c_[coord, value] return bcsamples - diff --git a/src/UQpy/surrogates/polynomial_chaos/physics_informed/PdePCE.py b/src/UQpy/surrogates/polynomial_chaos/physics_informed/PdePCE.py index c78a12587..f178c3aeb 100644 --- a/src/UQpy/surrogates/polynomial_chaos/physics_informed/PdePCE.py +++ b/src/UQpy/surrogates/polynomial_chaos/physics_informed/PdePCE.py @@ -1,6 +1,6 @@ import numpy as np -class PdePce: +class PdePCE: def __init__(self, pde_data, pde_func, pde_res=None, bc_res=None, bc_func=None, virt_func=None, nonlinear=False): """ Class containing information about PDE needed for physics-informed PCE diff --git a/src/UQpy/surrogates/polynomial_chaos/physics_informed/ReducedPCE.py b/src/UQpy/surrogates/polynomial_chaos/physics_informed/ReducedPCE.py index 760154465..53632a8f8 100644 --- a/src/UQpy/surrogates/polynomial_chaos/physics_informed/ReducedPCE.py +++ b/src/UQpy/surrogates/polynomial_chaos/physics_informed/ReducedPCE.py @@ -2,35 +2,34 @@ from UQpy.surrogates import * import copy import UQpy.surrogates.polynomial_chaos.physics_informed.Utilities as utils +from beartype import beartype - - -class ReducedPce: - - def __init__(self, pce, n_det, determ_pos=None): +class ReducedPCE: + @beartype + def __init__(self, pce: PolynomialChaosExpansion, n_deterministic: int, deterministic_positions: list = None): """ Class to create a reduced PCE filtering out deterministic input variables (e.g. geometry) :param pce: an object of the :py:meth:`UQpy` :class:`PolynomialChaosExpansion` class - :param n_det: number of deterministic input variables - :param determ_pos: list of positions of deterministic random variables in input vector. + :param n_deterministic: number of deterministic input variables + :param deterministic_positions: list of positions of deterministic random variables in input vector. If None, positions are assumed to be the first n_det variables. """ - if determ_pos is None: - determ_pos = list(np.arange(n_det)) + if deterministic_positions is None: + deterministic_positions = list(np.arange(n_deterministic)) self.original_multindex = pce.multi_index_set self.original_beta = pce.coefficients self.original_P, self.nvar = self.original_multindex.shape self.original_pce = pce - self.determ_pos = determ_pos + self.determ_pos = deterministic_positions # select basis containing only deterministic variables determ_multi_index = np.zeros(self.original_multindex.shape) determ_selection_mask = [False] * self.nvar - for i in determ_pos: + for i in deterministic_positions: determ_selection_mask[i] = True determ_multi_index[:, determ_selection_mask] = self.original_multindex[:, determ_selection_mask] @@ -43,7 +42,7 @@ def __init__(self, pce, n_det, determ_pos=None): reduced_multi_mask = self.original_multindex > 0 reduced_var_mask = [True] * self.nvar - for i in determ_pos: + for i in deterministic_positions: reduced_var_mask[i] = False reduced_multi_mask = reduced_multi_mask * reduced_var_mask @@ -60,13 +59,14 @@ def __init__(self, pce, n_det, determ_pos=None): P_unique, nrand = unique_basis.shape self.unique_basis = np.concatenate((np.zeros((1, nrand)), unique_basis), axis=0) - def eval_coord(self, coordinates, return_coeff=False): + @beartype + def evaluate_coordinate(self, coordinates: np.ndarray, return_coefficients: bool = False): """ Evaluate reduced PCE coefficients for given deterministic coordinates. :param coordinates: deterministic coordinates for evaluation of reduced PCE - :param return_coeff: if True, return a vector of deterministic coefficients, else return Mean and Variance + :param return_coefficients: if True, return a vector of deterministic coefficients, else return Mean and Variance :return: mean and variance, or a vector of deterministic coefficients if return_coeff """ @@ -77,36 +77,19 @@ def eval_coord(self, coordinates, return_coeff=False): self.determ_multi_index, self.determ_basis, self.original_pce.polynomial_basis.distributions).evaluate_basis( coord_x) - determ_beta = np.transpose(determ_basis_eval * self.original_beta) + return self._unique_coefficients(determ_basis_eval, return_coefficients) - reduced_beta = determ_beta[self.reduced_positions] - complement_beta = determ_beta[~self.reduced_positions] - - unique_beta = [] - for ind in np.unique(self.unique_indices): - sum_beta = np.sum(reduced_beta[self.unique_indices == ind, 0]) - unique_beta.append(sum_beta) - - unique_beta = np.array([0] + unique_beta) - unique_beta[0] = unique_beta[0] + np.sum(complement_beta) - - if not return_coeff: - mean = unique_beta[0] - var = np.sum(unique_beta[1:] ** 2) - return mean, var - else: - return unique_beta - - def derive_coord(self, coordinates, der_order, der_var, der_multiplier=2, return_coeff=False): + @beartype + def derive_coordinate(self, coordinates: np.ndarray, derivative_order: int, leading_variable: int, derivative_multiplier: float = 1, return_coefficients: bool = False): """ Evaluate derivative of reduced PCE coefficients for given deterministic coordinates. :param coordinates: deterministic coordinates for evaluation of reduced PCE - :param der_order: derivation order of reduced PCE - :param der_var: leading variable for derivation - :param der_multiplier: multiplier reflecting different sizes of the original physical and transformed spaces - :param return_coeff: if True, return a vector of deterministic coefficients, else return Mean and Variance + :param derivative_order: derivation order of reduced PCE + :param leading_variable: leading variable for derivation + :param derivative_multiplier: multiplier reflecting different sizes of the original physical and transformed spaces + :param return_coefficients: if True, return a vector of deterministic coefficients, else return Mean and Variance :return: mean and variance, or a vector of deterministic coefficients if return_coeff """ @@ -123,33 +106,18 @@ def derive_coord(self, coordinates, der_order, der_var, der_multiplier=2, return pce_deriv = copy.deepcopy(self.original_pce) pce_deriv.multi_index_set = determ_multi_index - determ_basis_eval = utils.derivative_basis(coord_s, pce_deriv, der_order=der_order, variable=der_var) * ( - der_multiplier ** der_order) - - determ_beta = np.transpose(determ_basis_eval * self.original_beta) - - reduced_beta = determ_beta[self.reduced_positions] - complement_beta = determ_beta[~self.reduced_positions] - - unique_beta = [] - for ind in np.unique(self.unique_indices): - sum_beta = np.sum(reduced_beta[self.unique_indices == ind, 0]) - unique_beta.append(sum_beta) + determ_basis_eval = utils.derivative_basis(coord_s, pce_deriv, derivative_order=derivative_order, + leading_variable=leading_variable) * ( + derivative_multiplier ** derivative_order) - unique_beta = np.array([0] + unique_beta) - unique_beta[0] = unique_beta[0] + np.sum(complement_beta) + return self._unique_coefficients(determ_basis_eval, return_coefficients) - if not return_coeff: - mean = unique_beta[0] - var = np.sum(unique_beta[1:] ** 2) - return mean, var - else: - return unique_beta - - def variance_contributions(self, unique_beta): + @beartype + def variance_contributions(self, unique_beta: np.ndarray): """ - Get first order conditional variances from coefficients of reduced PCE evaluated in specific deterministic coordinates + Get first order conditional variances from coefficients of reduced PCE evaluated in the specified deterministic + physical coordinates :param unique_beta: vector of reduced PCE coefficients :return: first order conditional variances associated to each input random variable @@ -172,3 +140,24 @@ def variance_contributions(self, unique_beta): variances[nn] = variance_contribution return variances + + def _unique_coefficients(self, determ_basis_eval, return_coefficients): + determ_beta = np.transpose(determ_basis_eval * self.original_beta) + + reduced_beta = determ_beta[self.reduced_positions] + complement_beta = determ_beta[~self.reduced_positions] + + unique_beta = [] + for ind in np.unique(self.unique_indices): + sum_beta = np.sum(reduced_beta[self.unique_indices == ind, 0]) + unique_beta.append(sum_beta) + + unique_beta = np.array([0] + unique_beta) + unique_beta[0] = unique_beta[0] + np.sum(complement_beta) + + if not return_coefficients: + mean = unique_beta[0] + var = np.sum(unique_beta[1:] ** 2) + return mean, var + else: + return unique_beta diff --git a/src/UQpy/surrogates/polynomial_chaos/physics_informed/Utilities.py b/src/UQpy/surrogates/polynomial_chaos/physics_informed/Utilities.py index a3f74cfa2..279609cd9 100644 --- a/src/UQpy/surrogates/polynomial_chaos/physics_informed/Utilities.py +++ b/src/UQpy/surrogates/polynomial_chaos/physics_informed/Utilities.py @@ -1,54 +1,87 @@ import numpy as np +import UQpy.distributions.baseclass.Distribution from UQpy.distributions.collection import Uniform, Normal from scipy import special as sp from scipy.special import legendre +from beartype import beartype +from UQpy.surrogates.polynomial_chaos.physics_informed.PdeData import PdeData +from UQpy.surrogates import * -def transformation_multiplier(original_geometry, var, derivation_order=1): + +@beartype +def transformation_multiplier(data_object: PdeData, leading_variable, derivation_order=1): """ Get transformation multiplier for derivatives of PCE basis functions (assuming Uniform distribution) - :param original_geometry: number of samples per dimension, i.e. total number is n**nvar - :param var: number of dimensions + :param data_object: :py:meth:`UQpy` :class:`PdeData` class containing geometry of physical space + :param leading_variable: leading variable for derivation :param derivation_order: order of derivative :return: multiplier reflecting a different sizes of physical and standardized spaces """ - size = np.abs(original_geometry.xmax[var] - original_geometry.xmin[var]) - multplier = (2 / size) ** derivation_order + size = np.abs(data_object.xmax[leading_variable] - data_object.xmin[leading_variable]) + multiplier = (2 / size) ** derivation_order - return multplier + return multiplier -def ortho_grid(n, nvar, xmin=-1, xmax=1): +@beartype +def ortho_grid(n_samples: int, nvar: int, x_min: float = -1, x_max: float = 1): """ Create orthogonal grid of samples. - :param n: number of samples per dimension, i.e. total number is n**nvar + :param n_samples: number of samples per dimension, i.e. total number is n**nvar :param nvar: number of dimensions - :param xmin: lower bound of hypercube - :param xmax: upper bound of hypercube + :param x_min: lower bound of hypercube + :param x_max: upper bound of hypercube :return: generated grid of samples """ - xrange = (xmax - xmin) / 2 - nsim = n ** nvar - x = np.linspace(xmin + xrange / n, xmax - xrange / n, n) + xrange = (x_max - x_min) / 2 + nsim = n_samples ** nvar + x = np.linspace(x_min + xrange / n_samples, x_max - xrange / n_samples, n_samples) x_list = [x] * nvar X = np.meshgrid(*x_list) grid = np.array(X).reshape((nvar, nsim)).T return grid -def derivative_basis(s, pce, der_order=0, variable=None): +@beartype +def derivative_basis(standardized_sample: np.ndarray, pce: PolynomialChaosExpansion, derivative_order: int, + leading_variable: int): """ - Create orthogonal grid of samples. - :param s: samples in standardized space for an evaluation of derived basis + Evaluate derivative basis of given pce object. + :param standardized_sample: samples in standardized space for an evaluation of derived basis :param pce: an object of the :py:meth:`UQpy` :class:`PolynomialChaosExpansion` class - :param der_order: order of derivative - :param variable: leading variable of derivatives + :param derivative_order: order of derivative + :param leading_variable: leading variable of derivatives :return: evaluated derived basis """ - multindex = pce.multi_index_set - joint_distribution = pce.polynomial_basis.distributions + if derivative_order >= 0: + multindex = pce.multi_index_set + joint_distribution = pce.polynomial_basis.distributions + + multivariate_basis = construct_basis(standardized_sample, multindex, joint_distribution, derivative_order, + leading_variable) + else: + raise Exception('derivative_basis function is defined only for positive derivative_order!') + + return multivariate_basis + + +@beartype +def construct_basis(standardized_sample: np.ndarray, multindex: np.ndarray, + joint_distribution: UQpy.distributions.baseclass.Distribution, + derivative_order: int = 0, leading_variable: int = 0): + """ + Construct and evaluate derivative basis. + :param standardized_sample: samples in standardized space for an evaluation of derived basis + :param multindex: set of multi-indices corresponding to polynomial orders in basis set + :param joint_distribution: joint probability distribution of input variables, + an object of the :py:meth:`UQpy` :class:`Distribution` class + :param derivative_order: order of derivative + :param leading_variable: leading variable of derivatives + :return: evaluated derived basis + """ card_basis, nvar = multindex.shape @@ -59,29 +92,29 @@ def derivative_basis(s, pce, der_order=0, variable=None): mask_herm = [type(marg) == Normal for marg in marginals] mask_lege = [type(marg) == Uniform for marg in marginals] - if variable is not None: + if derivative_order >= 0: - ns = multindex[:, variable] + ns = multindex[:, leading_variable] polysd = [] - if mask_lege[variable]: + if mask_lege[leading_variable]: for n in ns: - polysd.append(legendre(n).deriv(der_order)) + polysd.append(legendre(n).deriv(derivative_order)) - prep_l_deriv = np.sqrt((2 * multindex[:, variable] + 1)).reshape(-1, 1) + prep_l_deriv = np.sqrt((2 * multindex[:, leading_variable] + 1)).reshape(-1, 1) prep_deriv = [] for poly in polysd: - prep_deriv.append(np.polyval(poly, s[:, variable]).reshape(-1, 1)) + prep_deriv.append(np.polyval(poly, standardized_sample[:, leading_variable]).reshape(-1, 1)) prep_deriv = np.array(prep_deriv) - mask_herm[variable] = False - mask_lege[variable] = False + mask_herm[leading_variable] = False + mask_lege[leading_variable] = False - prep_hermite = sp.eval_hermitenorm(multindex[:, mask_herm][:, np.newaxis, :], s[:, mask_herm]) - prep_legendre = sp.eval_legendre(multindex[:, mask_lege][:, np.newaxis, :], s[:, mask_lege]) + prep_hermite = sp.eval_hermitenorm(multindex[:, mask_herm][:, np.newaxis, :], standardized_sample[:, mask_herm]) + prep_legendre = sp.eval_legendre(multindex[:, mask_lege][:, np.newaxis, :], standardized_sample[:, mask_lege]) prep_fact = np.sqrt(sp.factorial(multindex[:, mask_herm])) prep = np.sqrt((2 * multindex[:, mask_lege] + 1)) @@ -89,9 +122,6 @@ def derivative_basis(s, pce, der_order=0, variable=None): multivariate_basis = np.prod(prep_hermite / prep_fact[:, np.newaxis, :], axis=2).T multivariate_basis *= np.prod(prep_legendre * prep[:, np.newaxis, :], axis=2).T - if variable is not None: + if leading_variable is not None: multivariate_basis *= np.prod(prep_deriv * prep_l_deriv[:, np.newaxis, :], axis=2).T - return multivariate_basis - - diff --git a/src/UQpy/surrogates/polynomial_chaos/regressions/LeastAngleRegression.py b/src/UQpy/surrogates/polynomial_chaos/regressions/LeastAngleRegression.py index 99224b88b..0d008b3f0 100644 --- a/src/UQpy/surrogates/polynomial_chaos/regressions/LeastAngleRegression.py +++ b/src/UQpy/surrogates/polynomial_chaos/regressions/LeastAngleRegression.py @@ -65,7 +65,7 @@ def model_selection(pce_object: PolynomialChaosExpansion, target_error=1, check_ measured by Cross validation: Leave-one-out error (1 is perfect approximation). Option to check overfitting by empirical rule: if three steps in a row have a decreasing accuracy, stop the algorithm. - :param PolynomialChaosExpansion: existing target PCE for model_selection + :param pce_object: existing target PCE for model_selection :param target_error: Target error of an approximation (stoping criterion). :param check_overfitting: Whether to check over-fitting by empirical rule. :return: copy of input PolynomialChaosExpansion containing the best possible model for given data identified by LARs diff --git a/tests/unit_tests/surrogates/test_pce.py b/tests/unit_tests/surrogates/test_pce.py index 7e03b66a1..1cb300c57 100644 --- a/tests/unit_tests/surrogates/test_pce.py +++ b/tests/unit_tests/surrogates/test_pce.py @@ -11,11 +11,11 @@ from UQpy.surrogates.polynomial_chaos.polynomials.TotalDegreeBasis import TotalDegreeBasis from UQpy.surrogates.polynomial_chaos.polynomials.TensorProductBasis import TensorProductBasis # load PC^2 -from UQpy.surrogates.polynomial_chaos.physics_informed.ConstrainedPCE import ConstrainedPce +from UQpy.surrogates.polynomial_chaos.physics_informed.ConstrainedPCE import ConstrainedPCE from UQpy.surrogates.polynomial_chaos.physics_informed.PdeData import PdeData -from UQpy.surrogates.polynomial_chaos.physics_informed.PdePCE import PdePce +from UQpy.surrogates.polynomial_chaos.physics_informed.PdePCE import PdePCE from UQpy.surrogates.polynomial_chaos.physics_informed.Utilities import * -from UQpy.surrogates.polynomial_chaos.physics_informed.ReducedPCE import ReducedPce +from UQpy.surrogates.polynomial_chaos.physics_informed.ReducedPCE import ReducedPCE np.random.seed(1) max_degree, n_samples = 2, 10 @@ -444,57 +444,42 @@ def test_23(): ref_pdf = ref_pdf1 * ref_pdf2 assert (standardized_samples == ref_sample).all() and (standardized_pdf == ref_pdf).all() + def test_24(): """ Test Physics-Informed PCE of Euler-Bernoulli Beam and UQ """ - # Definition of PDE/ODE in context of PC^2 - # Definition of PDE/ODE - def pde_func(S, pce): + def pde_func(standardized_sample, pce): der_order = 4 - deriv_0_PCE = derivative_basis(S, pce, der_order=der_order, variable=0) * ((2 / 1) ** der_order) + deriv_pce = derivative_basis(standardized_sample, pce, derivative_order=der_order, leading_variable=0) * ( + (2 / 1) ** der_order) - pde_basis = deriv_0_PCE + pde_basis = deriv_pce return pde_basis # Definition of the source term - def pde_res(S): - load_s = Load(S) + def pde_res(standardized_sample): + load_s = load(standardized_sample) return -load_s[:, 0] - def Load(S): - return Const_Load(S) + def load(standardized_sample): + return const_load(standardized_sample) - def Const_Load(S): - l = (1 + (1 + S[:, 1]) / 2).reshape(-1, 1) + def const_load(standardized_sample): + l = (1 + (1 + standardized_sample[:, 1]) / 2).reshape(-1, 1) return l - # define sampling and evaluation of BC for estimation of error - def bc_res(nsim, pce): - bc_x = np.zeros((2, 2)) - bc_x[1, 0] = 1 - bc_s = polynomial_chaos.Polynomials.standardize_sample(bc_x, pce.polynomial_basis.distributions) - - der_order = 2 - deriv_0_PCE = np.sum( - derivative_basis(bc_s, pce, der_order=der_order, variable=0) * ((2 / 1) ** der_order) * np.array( - pce.coefficients).T, axis=1) - - return deriv_0_PCE - - def ref_sol(x, q): - return (q + 1) * (-(x ** 4) / 24 + x ** 3 / 12 - x / 24) - + # Definition of the function for sampling of boundary conditions def bc_sampling(nsim=1000): # BC sampling nsim_half = round(nsim / 2) sample = np.zeros((nsim, 2)) - real_ogrid_1d = ortho_grid(nsim_half, 1, 0, 1)[:, 0] + real_ogrid_1d = ortho_grid(nsim_half, 1, 0.0, 1.0)[:, 0] sample[:nsim_half, 0] = np.zeros(nsim_half) sample[:nsim_half, 1] = real_ogrid_1d @@ -504,10 +489,30 @@ def bc_sampling(nsim=1000): return sample - # definition of the stochastic model + # define sampling and evaluation of BC for estimation of error + def bc_res(nsim, pce): + physical_sample = np.zeros((2, 2)) + physical_sample[1, 0] = 1 + standardized_sample = polynomial_chaos.Polynomials.standardize_sample(physical_sample, + pce.polynomial_basis.distributions) + + der_order = 2 + deriv_pce = np.sum( + derivative_basis(standardized_sample, pce, derivative_order=der_order, leading_variable=0) * + ((2 / 1) ** der_order) * np.array(pce.coefficients).T, axis=1) + + return deriv_pce + + # Definition of the reference solution for an error estimation + def ref_sol(physical_coordinate, q): + return (q + 1) * (-(physical_coordinate ** 4) / 24 + physical_coordinate ** 3 / 12 - physical_coordinate / 24) + + # number of input variables + nrand = 1 nvar = 1 + nrand - least_squares = LeastSquareRegression() + + # definition of a joint probability distribution dist1 = Uniform(loc=0, scale=1) dist2 = Uniform(loc=0, scale=1) marg = [dist1, dist2] @@ -517,7 +522,8 @@ def bc_sampling(nsim=1000): geometry_xmin = np.array([0]) geometry_xmax = np.array([1]) - # prescribing BC of PDEs + # number of BC samples + nbc = 2 * 10 # derivation orders of prescribed BCs der_orders = [0, 2] @@ -526,24 +532,20 @@ def bc_sampling(nsim=1000): # sampling of BC points bc_xtotal = bc_sampling(20) - bc_ytotal = np.zeros(len(bc_xtotal)) + bc_x = [bc_xtotal, bc_xtotal] bc_y = [bc_ytotal, bc_ytotal] - # construct object containing all PDE data pde_data = PdeData(geometry_xmax, geometry_xmin, der_orders, bc_normals, bc_x, bc_y) - # construct object containing PDE data and PC^2 definitions of PDE - pde_pce = PdePce(pde_data, pde_func, pde_res=pde_res, bc_res=bc_res) + pde_pce = PdePCE(pde_data, pde_func, pde_res=pde_res, bc_res=bc_res) - # extract dirichlet BC dirichlet_bc = pde_data.dirichlet x_train = dirichlet_bc[:, :-1] y_train = dirichlet_bc[:, -1] - # construct PC^2 - + least_squares = LeastSquareRegression() p = 9 PCEorder = p @@ -551,28 +553,29 @@ def bc_sampling(nsim=1000): # create initial PCE object containing basis, regression method and dirichlet BC initpce = PolynomialChaosExpansion(polynomial_basis=polynomial_basis, regression_method=least_squares) - initpce.set_ed(x_train, y_train) + initpce.set_data(x_train, y_train) # construct a PC^2 object combining pde_data, pde_pce and initial PCE objects - pcpc = ConstrainedPce(pde_data, pde_pce, initpce) + pcpc = ConstrainedPCE(pde_data, pde_pce, initpce) # get coefficients of PC^2 by least angle regression pcpc.lar() + + # get coefficients of PC^2 by ordinary least squares pcpc.ols() - real_ogrid = ortho_grid(100, nvar, 0, 1) + # evaluate errors of approximations + real_ogrid = ortho_grid(100, nvar, 0.0, 1.0) yy_val_pce = pcpc.lar_pce.predict(real_ogrid).flatten() yy_val_pce_ols = pcpc.initial_pce.predict(real_ogrid).flatten() yy_val_true = ref_sol(real_ogrid[:, 0], real_ogrid[:, 1]).flatten() err = np.abs(yy_val_pce - yy_val_true) tot_err = np.sum(err) - print(tot_err) err_ols = np.abs(yy_val_pce_ols - yy_val_true) tot_err_ols = np.sum(err_ols) - print(tot_err_ols) - reduced_pce = ReducedPce(pcpc.lar_pce, n_det=1) + reduced_pce = ReducedPCE(pcpc.lar_pce, n_deterministic=1) coeff_res = [] var_res = [] @@ -593,9 +596,11 @@ def bc_sampling(nsim=1000): uq = np.zeros((1 + n_derivations, nrand)) for d in range(1 + n_derivations): if d == 0: - coeff = (reduced_pce.eval_coord(x, return_coeff=True)) + coeff = (reduced_pce.evaluate_coordinate(np.array(x), return_coefficients=True)) else: - coeff = reduced_pce.derive_coord(x, der_order=d, der_var=0, return_coeff=True) + coeff = reduced_pce.derive_coordinate(np.array(x), derivative_order=d, leading_variable=0, + return_coefficients=True, + derivative_multiplier=transformation_multiplier(pde_data, 0, d)) mean[d] = coeff[0] var[d] = np.sum(coeff[1:] ** 2) variances[d, :] = reduced_pce.variance_contributions(coeff) @@ -616,6 +621,5 @@ def bc_sampling(nsim=1000): lower_quantiles_modes = np.array(lower_quantiles_modes) upper_quantiles_modes = np.array(upper_quantiles_modes) - assert tot_err<10**-5 and tot_err_ols<10**-5 and round(mean_res[50, 4], 3) == -1.5 and round(mean_res[50, 3], 3) == 0 and round(vartot_res[50, 3], 3) == 0 - - + assert tot_err < 10 ** -5 and tot_err_ols < 10 ** -5 and round(mean_res[50, 4], 3) == -1.5 and round( + mean_res[50, 3], 3) == 0 and round(vartot_res[50, 3], 3) == 0