diff --git a/course-rom/_attachments/homework/problem-set-2.ipynb b/course-rom/_attachments/homework/problem-set-2.ipynb index 71ab95d..926f3d2 100644 --- a/course-rom/_attachments/homework/problem-set-2.ipynb +++ b/course-rom/_attachments/homework/problem-set-2.ipynb @@ -1 +1 @@ -{"cells":[{"cell_type":"markdown","source":["# Problem Set 2: RB for Linear Affine Elliptic Problems\n\n","## Problem Statement — Design of a Thermal Fin\n\n","We consider the problem of designing a thermal fin described in Problem Set 1. In PS1 we looked at some thoeretical issues (weak formulation and optimization formulation, convergence of the reduced basis approximation) and derived the necessary reduced basis quantities, i.e., expressions for $A_N ( \\mu )$, $F_N$ , and $L_N$ . This problem set is devoted to implementing the reduced basis approximation and solving a simple design problem.\n","## Part1 - Finite element approximation\n\n","We start by setting {feelpp} environment. The results will be stored in the directory `feelppdb`.\n"],"metadata":{}},{"cell_type":"code","execution_count":0,"metadata":{"slideshow":{"slide_type":"fragment"}},"outputs":[],"source":["import feelpp\n","from course_rom import laplacian\n","from course_rom.forms import *\n","import numpy as np\n","import json\n","import os\n","\n","d=os.getcwd()\n","print(f\"directory={d}\")\n","e=feelpp.Environment(['lap'],config=feelpp.localRepository(\".\"))\n"]},{"cell_type":"code","execution_count":0,"metadata":{"slideshow":{"slide_type":"fragment"}},"outputs":[],"source":["class Fin2D:\n"," def __init__(self,level='coarse'):\n"," self.level=level\n"," feelpp.Environment.setConfigFile(f\"{d}/src/cases/laplacian/fin/fin4/fin2d-{level}.cfg\")\n"," data = laplacian.loadSpecs(f\"{d}/src/cases/laplacian/fin/fin2d-{level}.json\")\n"," self.lap=laplacian.get(dim=2,order=1)\n"," self.lap.setSpecs(data)\n"," self.lap.initialize()\n"," print(f\"loaded {level} mesh with {self.lap.mesh().numGlobalElements()} elements\")\n"," self.decompose()\n"," self.a_thetas=[self.atheta0,self.atheta1,self.atheta2,self.atheta3,self.atheta4,self.atheta5]\n"," self.f_thetas=[self.ftheta0]\n","\n"," def decompose(self):\n"," self.Fq=[]\n"," for i,mat in enumerate(['Gamma_root']):\n"," self.Fq.append(self.lap.assembleFlux(markers=[mat],coeffs=1))\n"," self.Aq=[]\n"," for i,mat in enumerate(['Post', 'Fin_1', 'Fin_2', 'Fin_3', 'Fin_4']):\n"," C=np.array([[1,0],[0,1]])\n"," self.Aq.append(self.lap.assembleGradGrad(markers=[mat],coeffs=C))\n"," for i,mat in enumerate(['Gamma_ext']):\n"," self.Aq.append(self.lap.assembleMass(markers=[mat],coeffs=1))\n"," def Qa(self):\n"," return len(self.Aq)\n"," def Qf(self):\n"," return len(self.Fq)\n"," def atheta0(self,mu): return 1\n"," def atheta1(self,mu): return mu[0]\n"," def atheta2(self,mu): return mu[1]\n"," def atheta3(self,mu): return mu[2]\n"," def atheta4(self,mu): return mu[3]\n"," def atheta5(self,mu): return mu[4]\n"," def ftheta0(self,mu): return 1\n","\n","\n"," def a(self,mu):\n"," return sum( (theta(mu) * aq for theta, aq in zip(self.a_thetas, self.Aq)),start=form2(test=self.lap.Xh(),trial=self.lap.Xh()))\n"," def f(self,mu):\n"," return sum( (theta(mu) * fq for theta, fq in zip(self.f_thetas, self.Fq)),start=form1(test=self.lap.Xh()))\n","\n"," def solve(self,mu):\n"," u = self.lap.Xh().element()\n"," fmu=self.f(mu)\n"," self.a(mu).solve(solution=u,rhs=fmu,rebuild=True)\n"," return u,fmu(u)\n"]},{"cell_type":"markdown","source":["We will need the following two sampling methods\n"],"metadata":{}},{"cell_type":"code","execution_count":0,"metadata":{"slideshow":{"slide_type":"fragment"}},"outputs":[],"source":["def log_random_sample(bounds, num_samples=1):\n"," # bounds is a list of tuples (a, b) for each dimension\n"," log_bounds = [(np.log(a), np.log(b)) for a, b in bounds]\n"," samples = [np.exp(np.random.uniform(log_a, log_b, num_samples)) for log_a, log_b in log_bounds]\n"," return np.array(samples).T # Transpose to get samples in shape (num_samples, dimensions)\n","\n","def log_equidistributed_sample(bounds, num_samples=1):\n"," # bounds is a list of tuples (a, b) for each dimension\n"," log_bounds = [(np.log(a), np.log(b)) for a, b in bounds]\n"," samples = [np.exp(np.linspace(log_a,log_b,num_samples )) for log_a, log_b in log_bounds]\n"," return np.array(samples).T # Transpose to get samples in shape (num_samples, dimensions)\n"]},{"cell_type":"markdown","source":["We will need the following grids\n"],"metadata":{}},{"cell_type":"code","execution_count":0,"metadata":{"slideshow":{"slide_type":"fragment"}},"outputs":[],"source":["fin_coarse = Fin2D(level='coarse')\n","fin_medium = Fin2D(level='medium')\n","fin_fine = Fin2D(level='fine')\n"]},{"cell_type":"markdown","source":["We can print the size of the meshes using the following code.\n"],"metadata":{}},{"cell_type":"code","execution_count":0,"metadata":{"slideshow":{"slide_type":"fragment"}},"outputs":[],"source":["for fin in [fin_coarse,fin_medium, fin_fine]:\n"," print(f\"[{fin.level}] nelts: {fin.lap.mesh().numGlobalElements()}\")\n","\n","sampling=log_equidistributed_sample([(1.5,1.5),(.01,1)], num_samples=20)\n","\n","Troots=dict()\n","for fin in [fin_coarse,fin_medium, fin_fine]:\n"," Troots[fin.level]=[]\n"," for i,sample in enumerate(sampling):\n"," mu=[sample[0],sample[0],sample[0],sample[0],sample[1]]\n"," u,s=fin.solve(mu)\n"," print(f\"s_{fin.level}({mu})={s}\")\n"," Troots[fin.level].append(s)\n","\n","print(f\"sampling={sampling[:,1]}\")\n"]},{"cell_type":"markdown","source":["We can plot the results using plotly.\n"],"metadata":{}},{"cell_type":"code","execution_count":0,"metadata":{"slideshow":{"slide_type":"fragment"}},"outputs":[],"source":["import plotly.graph_objects as go\n","import numpy as np\n","\n","fig = go.Figure()\n","for fin in [fin_coarse,fin_medium, fin_fine]:\n"," fig.add_trace(go.Scatter(\n"," x=sampling[:,1], y=Troots[fin.level], mode='lines+markers', name=f'Mean_{fin.level}_Gamma_root'))\n","fig.update_layout(title='Mean Temperature', xaxis_title='mu', yaxis_title='Temperature')\n","fig.show()\n"]},{"cell_type":"markdown","source":["## Part 2 – Reduced Basis Approximation\n\n","The point of departure for the reduced basis approximation is a high – dimensional finite element truth discretization. In the offline stage we require the finite element solution to build the reduced basis and we thus also need the FE matrices. In this problem set we skip the FE assembly step and provide all of the necessary data for use in Python (see Appendix 1).\n","We saw in class that the reduced basis solution $u_N ( \\mu ) \\in \\mathbb{R}^N$ satisfies the set of $N\\times N$ linear equations,\n","\n$$\n A_N( \\mu )u_N( \\mu ) = F_N;\n$$\n","and that the outputis given by\n","\n$$\n {T_{root}}_N ( \\mu ) = L^T_N u_N ( \\mu ).\n$$\n","We derived expressions for $A_N( \\mu ) \\in \\mathbb{R}^{N\\times N}$ in terms of $A_N( \\mu )$ and $Z$, $F_N \\in \\mathbb{R}^N$ in terms of $F_N$ and $Z$, and $L_N \\in \\mathbb{R}^N$ in terms of $L_N$ and $Z$; here $Z$ is an $\\mathcal{N} \\times N$ matrix, the jth column of which is $u_N ( \\mu j )$ (the nodal values of $u_N ( \\mu j ))$. Finally, it follows from affine parameter dependence that $A_N ( \\mu )$ can be expressed as\n","\n$$\nA_N( \\mu ) = \\sum_{q=1}^Q \\Theta^q( \\mu )A^q_N.\n$$\n","The goal is to implement an offline/ online version of the reduced – basis method following the computational decomposition indicated below.\n","\n","- Offline\n","\n","1. Choose $N$.\n","1. Choose the sample $S_N$ .\n","1. Construct $Z$.\n","1. Construct $A^q_N, q = 1,\\ldots,Q; F_N; \\text{ and } L_N.$\n","\n","\n","1. Form $A_N ( \\mu )$ from ([1.3](#eq:1.3)).\n","1. Solve $A_N( \\mu )u_N( \\mu ) = F_N.$\n","1. Evaluate the output ${T_{root}}_N ( \\mu )$ from [1.2](#eq:1.2)).\n","\n","- Online\n","\n","1 The idea is that the offline stage is done only once, generating a small datafile with the $A^q_N , q = 1,\\ldots,Q$, $F_N$, and $L_N$; the on-line stage then accesses this datafile to provide real-time response to new $\\mu$ queries. For the required off-line finite element calculations in this and the following questions, you should first use the coarse triangulation $\\mathcal{T}_{h,\\mathrm{coarse}}$.\n","\n","1. Show that the operation count for the on-line stage of your code is independent of $\\mathcal{N}$ . In particular show that the operation count (number of floating-point operations) for the on-line stage, for each new $\\mu$ of interest, can be expressed as\n","\n$$\nc_1N^{\\gamma_1} +c_2 N^{\\gamma_2} +c_3 N^{\\gamma_3},\n$$\n","for $c_1, c_2, c_3, \\gamma_1, \\gamma_2,$ and $\\gamma_3$ independent of $N$. Give values for the constants $c_1, c_2, c_3, \\gamma_1, \\gamma_2,$ and $\\gamma_3$.\n","\n","1. We first consider a one parameter ($P = 1$) problem. To this end, we keep the Biot number fixed at $Bi = 0.1$ and assume that the conductivities of all fins are equivalent, i.e., $k_1 = k_2 = k_3 = k_4$, but are allowed to vary between $0.1$ and $10$ – we thus have $\\mu \\in D =\n[0.1, 10].$ The sample set $S_N$ for $N_{max} = 8$ is given the log equidistributed sampling.\n","1. Generate the reduced basis matrix $Z$ and all necessary reduced basis quantities. You have two options: you can use the solution \"snapshots\" directly in $Z$ or perform a Gram-Schmidt orthonormalization to construct $Z$ (Note that you require the $X$ – inner product to perform Gram-Schmidt; here, we use $(\\cdot, \\cdot)_X = a(\\cdot, \\cdot; \\mu )$, where $\\mu = 1$ – all conductivities are $1$ and the Biot number is $0.1$). Calculate the condition number of $A_N ( \\mu )$ for $N = 8$ and for $\\mu = 1$ and $\\mu = 10$ with and without Gram – Schmidt orthonormalization. What do you observe? Solve the reduced basis approximation (where you use the snapshots directly in $Z$) for $\\mu_1 = 0.1$ and $N = 8$. What is $u_N( \\mu_1)$? How do you expect $u_N( \\mu_2)$ to look like for $\\mu_2\n= 10.0$? What about $\\mu_3 = 1.0975$? Solve the Gram – Schmidt orthonormalized reduced basis approximation for $\\mu_1 = 0.1$ and $\\mu\n2 = 10$ for $N = 8$. What do you observe? Can you justify the result? For the remaining questions you should use the Gram – Schmidt orthonormalized reduced basis approximation.\n","\n","1. Verify that, for $\\mu = 1.5$ (recall that Biot is still fixed at $0.1$) and $N = 8$, the value of the output is ${T_{root}}_N ( \\mu ) = 1.61$ up to 2 digits.\n","1. We next introduce a regular test sample, $\\Xi_{test} \\subset D$, of size $ntest = 100$ (in Python you can simply use `linspace(0.1, 10, 100)` to generate $\\Xi_{test}$). Plot the convergence of the maximum relative error in the energy norm $\\max_{\\mu \\in\\Xi_{test}} |||u( \\mu ) -\nu_N ( \\mu )|||_\\mu /|||u( \\mu )|||_\\mu$ and the maximum relative output error max $\\mu \\in\\Xi_{test} |{T_{root}}( \\mu ) - {T_{root}} N( \\mu\n)|/{T_{root}}( \\mu )$ as a function of $N$ (use the Python command `semilogy` for plotting).\n","1. Compare the average CPU time over the test sample required to solve the reduced basis online stage with direct solution of the FE approximation as a function of $N$.\n","1. What value of $N$ do you require to achieve a relative accuracy in the output of 1%. What savings in terms of CPU time does this % correspond to?\n","1. Solve problems b) 3. to 5. using the medium and fine FE triangulation. Is the dependence on $\\mathcal{N}$ as you would anticipate?\n","\n","\n","1. Verify that, for $\\mu_0 = {0.4, 0.6, 0.8, 1.2, 0.15}$, i.e. $Bi = 0.15$, the value of the output is ${T_{root}}_N ( \\mu 0) = 1.61$.\n","1. We next introduce a regular test sample, $\\Xi_{test} \\subset D$, of size $ntest =100$ (in Python you can simply use `linspace(0.01, 1, 100)` to generate $\\Xi_{test}$). Plot the convergence of the maximum relative error in the energy norm $\\max_{\\mu \\in\\Xi_{test}} |||u( \\mu ) - u_N ( \\mu )|||_\\mu /|||u( \\mu\n)|||_\\mu$ and the maximum relative output error $\\max_{\\mu \\in\\Xi_{test}} |{T_{root}}( \\mu ) - {T_{root}}_N( \\mu )|/{T_{root}}(\n \\mu )$ as a function of $N$ (use the Python command `semilogy` for plotting).\n","1. The Biot number is directly related to the cooling method; higher cooling rates (higher $Bi$) imply lower (better) ${T_{root}}$ but also higher (worse) initial and operational costs. We can thus define (say) a total cost function as\n","\n$$\nC(Bi) = Bi + {T_{root}}(Bi),\n$$\n","minimization of which yields an optimal solution. Apply your (online) reduced – basis approx – imation for ${T_{root}}_N$ (that is, replace ${T_{root}}(Bi)$ in ([above](#eq:CBi)) with ${T_{root}}_N (Bi))$ to find the optimal $Bi.$ Any (simple) optimization procedure suffices for the minimization.\n","\n","1. We now consider another one parameter $(P = 1)$ problem. This time, we assume that the conductivities are fixed at $\\{k_1,k_2,k_3,k_4\\} = \\{0.4,0.6,0.8,1.2\\}$, and that only the Biot number, $Bi$, is allowed to vary from $0.01$ to $1$. The sample set $S_N$ for $N_{max} = 11$ is given by log equidistributed sampling. Generate an orthonormal $Z$ from the sample set using the medium triangulation.\n","1. We consider now a two parameter $(P = 2)$ problem where the conductivities are assumed to be equivalent, i.e., $k_1 = k_2 = k_3 = k_4$, but are allowed to vary between $0.1$ and $10$; and the Biot number, $Bi$, is allowed to vary from $0.01$ to $1$. The sample set $S_N$ for $N_{max} = 46$ is given by the log random sampling. Generate an orthonormal $Z$ from the sample set using the coarse triangulation.\n","1. We next introduce a regular grid, $\\Xi_{test} \\subset D$, of size $ntest = 400$ (a regular $20 \\times 20$ grid). Plot the convergence of the maximum relative error in the energy norm $\\max_{\\mu \\in\\Xi_{test}} |||u( \\mu ) - u_N ( \\mu )|||_\\mu /|||u( \\mu\n)|||_\\mu$ and the maximum relative output error $max_{\\mu \\in \\Xi_{test}} |{T_{root}}( \\mu ) - {T_{root}}_N( \\mu )|/{T_{root}}( \\mu)$ as a function of $N$.\n","1. We now consider the POD method and we wish to compare it with the Greedy approximation. To this end, we sample log randomly the parameter space ($P=2$) and take $n_{\\mathrm{train}}=100$ samples. Build the POD approximation using these samples as training set and compare the results with the Greedy approximation. Compute the RIC and the dimension of the POD space ($N$) such that the RIC is $99\\%$ of the total energy. Plot the POD and Greedy convergence of the maximum relative error in the energy norm $\\max_{\\mu \\in\\Xi_{test}} |||u( \\mu ) - u_N ( \\mu )|||_\\mu /|||u( \\mu\n)|||_\\mu$ and the maximum relative output error $max_{\\mu \\in \\Xi_{test}} |{T_{root}}( \\mu ) - {T_{root}}_N( \\mu )|/{T_{root}}( \\mu\n)$ as a function of $N$.\n","1. Implement the parametrisation with respect to $L$ and $t$. The reference geometry is the one given by the `.geo` file and the corresponding $\\hat{L}$ and $\\hat{t}$. Plot the mean temperature ${T_{root}}( \\mu )$ as a function $t \\in [0.1,0.5]$ and the other parameters set to $k_i=0.1, L=2.5, Bi=0.1$.\n","\n\n","## Appendix 1 – Finite Element Method Implementation\n\n","We use Feel++ to implement the finite element matrices.\n"],"metadata":{}}],"metadata":{"language_info":{"name":"python","version":"3.9.1"}},"nbformat":4,"nbformat_minor":4} \ No newline at end of file +{"cells":[{"cell_type":"markdown","source":["# Problem Set 2: RB for Linear Affine Elliptic Problems\n\n","## Problem Statement — Design of a Thermal Fin\n\n","We consider the problem of designing a thermal fin described in Problem Set 1. In PS1 we looked at some thoeretical issues (weak formulation and optimization formulation, convergence of the reduced basis approximation) and derived the necessary reduced basis quantities, i.e., expressions for $A_N ( \\mu )$, $F_N$ , and $L_N$ . This problem set is devoted to implementing the reduced basis approximation and solving a simple design problem.\n","## Part1 - Finite element approximation\n\n","We start by setting {feelpp} environment. The results will be stored in the directory `feelppdb`.\n"],"metadata":{}},{"cell_type":"code","execution_count":0,"metadata":{"slideshow":{"slide_type":"fragment"}},"outputs":[],"source":["import feelpp\n","from course_rom import laplacian\n","from course_rom.forms import *\n","import numpy as np\n","import json\n","import os\n","\n","d=os.getcwd()\n","print(f\"directory={d}\")\n","e=feelpp.Environment(['lap'],config=feelpp.localRepository(\".\"))\n"]},{"cell_type":"code","execution_count":0,"metadata":{"slideshow":{"slide_type":"fragment"}},"outputs":[],"source":["class Fin2D:\n"," def __init__(self,level='coarse'):\n"," self.level=level\n"," feelpp.Environment.setConfigFile(f\"{d}/src/cases/laplacian/fin/fin4/fin2d-{level}.cfg\")\n"," data = laplacian.loadSpecs(f\"{d}/src/cases/laplacian/fin/fin2d-{level}.json\")\n"," self.lap=laplacian.get(dim=2,order=1)\n"," self.lap.setSpecs(data)\n"," self.lap.initialize()\n"," print(f\"loaded {level} mesh with {self.lap.mesh().numGlobalElements()} elements\")\n"," self.decompose()\n"," self.a_thetas=[self.atheta0,self.atheta1,self.atheta2,self.atheta3,self.atheta4,self.atheta5]\n"," self.f_thetas=[self.ftheta0]\n","\n"," def decompose(self):\n"," self.Fq=[]\n"," for i,mat in enumerate(['Gamma_root']):\n"," self.Fq.append(self.lap.assembleFlux(markers=[mat],coeffs=1))\n"," self.Aq=[]\n"," for i,mat in enumerate(['Post', 'Fin_1', 'Fin_2', 'Fin_3', 'Fin_4']):\n"," C=np.array([[1,0],[0,1]])\n"," self.Aq.append(self.lap.assembleGradGrad(markers=[mat],coeffs=C))\n"," for i,mat in enumerate(['Gamma_ext']):\n"," self.Aq.append(self.lap.assembleMass(markers=[mat],coeffs=1))\n"," def Qa(self):\n"," return len(self.Aq)\n"," def Qf(self):\n"," return len(self.Fq)\n"," def atheta0(self,mu): return 1\n"," def atheta1(self,mu): return mu[0]\n"," def atheta2(self,mu): return mu[1]\n"," def atheta3(self,mu): return mu[2]\n"," def atheta4(self,mu): return mu[3]\n"," def atheta5(self,mu): return mu[4]\n"," def ftheta0(self,mu): return 1\n","\n","\n"," def a(self,mu):\n"," return sum( (theta(mu) * aq for theta, aq in zip(self.a_thetas, self.Aq)),start=form2(test=self.lap.Xh(),trial=self.lap.Xh()))\n"," def f(self,mu):\n"," return sum( (theta(mu) * fq for theta, fq in zip(self.f_thetas, self.Fq)),start=form1(test=self.lap.Xh()))\n","\n"," def solve(self,mu):\n"," u = self.lap.Xh().element()\n"," fmu=self.f(mu)\n"," self.a(mu).solve(solution=u,rhs=fmu,rebuild=True)\n"," return u,fmu(u)\n"]},{"cell_type":"markdown","source":["We will need the following two sampling methods\n"],"metadata":{}},{"cell_type":"code","execution_count":0,"metadata":{"slideshow":{"slide_type":"fragment"}},"outputs":[],"source":["def log_random_sample(bounds, num_samples=1):\n"," # bounds is a list of tuples (a, b) for each dimension\n"," log_bounds = [(np.log(a), np.log(b)) for a, b in bounds]\n"," samples = [np.exp(np.random.uniform(log_a, log_b, num_samples)) for log_a, log_b in log_bounds]\n"," return np.array(samples).T # Transpose to get samples in shape (num_samples, dimensions)\n","\n","def log_equidistributed_sample(bounds, num_samples=1):\n"," # bounds is a list of tuples (a, b) for each dimension\n"," log_bounds = [(np.log(a), np.log(b)) for a, b in bounds]\n"," samples = [np.exp(np.linspace(log_a,log_b,num_samples )) for log_a, log_b in log_bounds]\n"," return np.array(samples).T # Transpose to get samples in shape (num_samples, dimensions)\n"]},{"cell_type":"markdown","source":["We will need the following grids\n"],"metadata":{}},{"cell_type":"code","execution_count":0,"metadata":{"slideshow":{"slide_type":"fragment"}},"outputs":[],"source":["fin_coarse = Fin2D(level='coarse')\n","fin_medium = Fin2D(level='medium')\n","fin_fine = Fin2D(level='fine')\n"]},{"cell_type":"markdown","source":["We can print the size of the meshes using the following code.\n"],"metadata":{}},{"cell_type":"code","execution_count":0,"metadata":{"slideshow":{"slide_type":"fragment"}},"outputs":[],"source":["for fin in [fin_coarse,fin_medium, fin_fine]:\n"," print(f\"[{fin.level}] nelts: {fin.lap.mesh().numGlobalElements()}\")\n","\n","sampling=log_equidistributed_sample([(1.5,1.5),(.01,1)], num_samples=20)\n","\n","Troots=dict()\n","for fin in [fin_coarse,fin_medium, fin_fine]:\n"," Troots[fin.level]=[]\n"," for i,sample in enumerate(sampling):\n"," mu=[sample[0],sample[0],sample[0],sample[0],sample[1]]\n"," u,s=fin.solve(mu)\n"," print(f\"s_{fin.level}({mu})={s}\")\n"," Troots[fin.level].append(s)\n","\n","print(f\"sampling={sampling[:,1]}\")\n"]},{"cell_type":"markdown","source":["We can plot the results using plotly.\n"],"metadata":{}},{"cell_type":"code","execution_count":0,"metadata":{"slideshow":{"slide_type":"fragment"}},"outputs":[],"source":["import plotly.graph_objects as go\n","import numpy as np\n","\n","fig = go.Figure()\n","for fin in [fin_coarse,fin_medium, fin_fine]:\n"," fig.add_trace(go.Scatter(\n"," x=sampling[:,1], y=Troots[fin.level], mode='lines+markers', name=f'Mean_{fin.level}_Gamma_root'))\n","fig.update_layout(title='Mean Temperature', xaxis_title='mu', yaxis_title='Temperature')\n","fig.show()\n"]},{"cell_type":"markdown","source":["## Part 2 – Reduced Basis Approximation\n\n","The point of departure for the reduced basis approximation is a high – dimensional finite element truth discretization. In the offline stage we require the finite element solution to build the reduced basis and we thus also need the FE matrices. In this problem set we skip the FE assembly step and provide all of the necessary data for use in Python (see Appendix 1).\n","We saw in class that the reduced basis solution $u_N ( \\mu ) \\in \\mathbb{R}^N$ satisfies the set of $N\\times N$ linear equations,\n","\n$$\n A_N( \\mu )u_N( \\mu ) = F_N;\n$$\n","and that the outputis given by\n","\n$$\n {T_{root}}_N ( \\mu ) = L^T_N u_N ( \\mu ).\n$$\n","We derived expressions for $A_N( \\mu ) \\in \\mathbb{R}^{N\\times N}$ in terms of $A_N( \\mu )$ and $Z$, $F_N \\in \\mathbb{R}^N$ in terms of $F_N$ and $Z$, and $L_N \\in \\mathbb{R}^N$ in terms of $L_N$ and $Z$; here $Z$ is an $\\mathcal{N} \\times N$ matrix, the jth column of which is $u_N ( \\mu j )$ (the nodal values of $u_N ( \\mu j ))$. Finally, it follows from affine parameter dependence that $A_N ( \\mu )$ can be expressed as\n","\n$$\nA_N( \\mu ) = \\sum_{q=1}^Q \\Theta^q( \\mu )A^q_N.\n$$\n","The goal is to implement an offline/ online version of the reduced – basis method following the computational decomposition indicated below.\n","\n","- Offline\n","\n","1. Choose $N$.\n","1. Choose the sample $S_N$ .\n","1. Construct $Z$.\n","1. Construct $A^q_N, q = 1,\\ldots,Q; F_N; \\text{ and } L_N.$\n","\n","\n","1. Form $A_N ( \\mu )$ from ([1.3](#eq:1.3)).\n","1. Solve $A_N( \\mu )u_N( \\mu ) = F_N.$\n","1. Evaluate the output ${T_{root}}_N ( \\mu )$ from [1.2](#eq:1.2)).\n","\n","- Online\n","\n","1 The idea is that the offline stage is done only once, generating a small datafile with the $A^q_N , q = 1,\\ldots,Q$, $F_N$, and $L_N$; the on-line stage then accesses this datafile to provide real-time response to new $\\mu$ queries. For the required off-line finite element calculations in this and the following questions, you should first use the coarse triangulation $\\mathcal{T}_{h,\\mathrm{coarse}}$.\n","\n","1. Show that the operation count for the on-line stage of your code is independent of $\\mathcal{N}$ . In particular show that the operation count (number of floating-point operations) for the on-line stage, for each new $\\mu$ of interest, can be expressed as\n","\n$$\nc_1N^{\\gamma_1} +c_2 N^{\\gamma_2} +c_3 N^{\\gamma_3},\n$$\n","for $c_1, c_2, c_3, \\gamma_1, \\gamma_2,$ and $\\gamma_3$ independent of $N$. Give values for the constants $c_1, c_2, c_3, \\gamma_1, \\gamma_2,$ and $\\gamma_3$.\n","\n","1. We first consider a one parameter ($P = 1$) problem. To this end, we keep the Biot number fixed at $Bi = 0.1$ and assume that the conductivities of all fins are equivalent, i.e., $k_1 = k_2 = k_3 = k_4$, but are allowed to vary between $0.1$ and $10$ – we thus have $\\mu \\in D =\n[0.1, 10].$ The sample set $S_N$ for $N_{max} = 8$ is given the log equidistributed sampling.\n","1. Generate the reduced basis matrix $Z$ and all necessary reduced basis quantities. You have two options: you can use the solution \"snapshots\" directly in $Z$ or perform a Gram-Schmidt orthonormalization to construct $Z$ (Note that you require the $X$ – inner product to perform Gram-Schmidt; here, we use $(\\cdot, \\cdot)_X = a(\\cdot, \\cdot; \\mu )$, where $\\mu = 1$ – all conductivities are $1$ and the Biot number is $0.1$). Calculate the condition number of $A_N ( \\mu )$ for $N = 8$ and for $\\mu = 1$ and $\\mu = 10$ with and without Gram – Schmidt orthonormalization. What do you observe? Solve the reduced basis approximation (where you use the snapshots directly in $Z$) for $\\mu_1 = 0.1$ and $N = 8$. What is $u_N( \\mu_1)$? How do you expect $u_N( \\mu_2)$ to look like for $\\mu_2\n= 10.0$? What about $\\mu_3 = 1.0975$? Solve the Gram – Schmidt orthonormalized reduced basis approximation for $\\mu_1 = 0.1$ and $\\mu\n2 = 10$ for $N = 8$. What do you observe? Can you justify the result? For the remaining questions you should use the Gram – Schmidt orthonormalized reduced basis approximation.\n","\n","1. Verify that, for $\\mu = 1.5$ (recall that Biot is still fixed at $0.1$) and $N = 8$, the value of the output is ${T_{root}}_N ( \\mu ) = 1.61$ up to 2 digits.\n","1. We next introduce a regular test sample, $\\Xi_{test} \\subset D$, of size $ntest = 100$ (in Python you can simply use `linspace(0.1, 10, 100)` to generate $\\Xi_{test}$). Plot the convergence of the maximum relative error in the energy norm $\\max_{\\mu \\in\\Xi_{test}} |||u( \\mu ) -\nu_N ( \\mu )|||_\\mu /|||u( \\mu )|||_\\mu$ and the maximum relative output error max $\\mu \\in\\Xi_{test} |{T_{root}}( \\mu ) - {T_{root}} N( \\mu\n)|/{T_{root}}( \\mu )$ as a function of $N$ (use the Python command `semilogy` for plotting).\n","1. Compare the average CPU time over the test sample required to solve the reduced basis online stage with direct solution of the FE approximation as a function of $N$.\n","1. What value of $N$ do you require to achieve a relative accuracy in the output of 1%. What savings in terms of CPU time does this % correspond to?\n","1. Solve problems b) 3. to 5. using the medium and fine FE triangulation. Is the dependence on $\\mathcal{N}$ as you would anticipate?\n","\n","\n","1. Verify that, for $\\mu_0 = {0.4, 0.6, 0.8, 1.2, 0.15}$, i.e. $Bi = 0.15$, the value of the output is ${T_{root}}_N ( \\mu 0) =1.53$.\n","1. We next introduce a regular test sample, $\\Xi_{test} \\subset D$, of size $ntest =100$ (in Python you can simply use `linspace(0.01, 1, 100)` to generate $\\Xi_{test}$). Plot the convergence of the maximum relative error in the energy norm $\\max_{\\mu \\in\\Xi_{test}} |||u( \\mu ) - u_N ( \\mu )|||_\\mu /|||u( \\mu\n)|||_\\mu$ and the maximum relative output error $\\max_{\\mu \\in\\Xi_{test}} |{T_{root}}( \\mu ) - {T_{root}}_N( \\mu )|/{T_{root}}(\n \\mu )$ as a function of $N$ (use the Python command `semilogy` for plotting).\n","1. The Biot number is directly related to the cooling method; higher cooling rates (higher $Bi$) imply lower (better) ${T_{root}}$ but also higher (worse) initial and operational costs. We can thus define (say) a total cost function as\n","\n$$\nC(Bi) = Bi + {T_{root}}(Bi),\n$$\n","minimization of which yields an optimal solution. Apply your (online) reduced – basis approx – imation for ${T_{root}}_N$ (that is, replace ${T_{root}}(Bi)$ in ([above](#eq:CBi)) with ${T_{root}}_N (Bi))$ to find the optimal $Bi.$ Any (simple) optimization procedure suffices for the minimization.\n","\n","1. We now consider another one parameter $(P = 1)$ problem. This time, we assume that the conductivities are fixed at $\\{k_1,k_2,k_3,k_4\\} = \\{0.4,0.6,0.8,1.2\\}$, and that only the Biot number, $Bi$, is allowed to vary from $0.01$ to $1$. The sample set $S_N$ for $N_{max} = 11$ is given by log equidistributed sampling. Generate an orthonormal $Z$ from the sample set using the medium triangulation.\n","1. We consider now a two parameter $(P = 2)$ problem where the conductivities are assumed to be equivalent, i.e., $k_1 = k_2 = k_3 = k_4$, but are allowed to vary between $0.1$ and $10$; and the Biot number, $Bi$, is allowed to vary from $0.01$ to $1$. The sample set $S_N$ for $N_{max} = 46$ is given by the log random sampling. Generate an orthonormal $Z$ from the sample set using the coarse triangulation.\n","1. We next introduce a regular grid, $\\Xi_{test} \\subset D$, of size $ntest = 400$ (a regular $20 \\times 20$ grid). Plot the convergence of the maximum relative error in the energy norm $\\max_{\\mu \\in\\Xi_{test}} |||u( \\mu ) - u_N ( \\mu )|||_\\mu /|||u( \\mu\n)|||_\\mu$ and the maximum relative output error $max_{\\mu \\in \\Xi_{test}} |{T_{root}}( \\mu ) - {T_{root}}_N( \\mu )|/{T_{root}}( \\mu)$ as a function of $N$.\n","1. We now consider the POD method and we wish to compare it with the Greedy approximation. To this end, we sample log randomly the parameter space ($P=2$) and take $n_{\\mathrm{train}}=100$ samples. Build the POD approximation using these samples as training set and compare the results with the Greedy approximation. Compute the RIC and the dimension of the POD space ($N$) such that the RIC is $99\\%$ of the total energy. Plot the POD and Greedy convergence of the maximum relative error in the energy norm $\\max_{\\mu \\in\\Xi_{test}} |||u( \\mu ) - u_N ( \\mu )|||_\\mu /|||u( \\mu\n)|||_\\mu$ and the maximum relative output error $max_{\\mu \\in \\Xi_{test}} |{T_{root}}( \\mu ) - {T_{root}}_N( \\mu )|/{T_{root}}( \\mu\n)$ as a function of $N$.\n","1. Implement the parametrisation with respect to $L$ and $t$. The reference geometry is the one given by the `.geo` file and the corresponding $\\hat{L}$ and $\\hat{t}$. Plot the mean temperature ${T_{root}}( \\mu )$ as a function $t \\in [0.1,0.5]$ and the other parameters set to $k_i=0.1, L=2.5, Bi=0.1$.\n","\n\n","## Appendix 1 – Finite Element Method Implementation\n\n","We use Feel++ to implement the finite element matrices.\n"],"metadata":{}}],"metadata":{"language_info":{"name":"python","version":"3.9.1"}},"nbformat":4,"nbformat_minor":4} \ No newline at end of file diff --git a/course-rom/homework/problem-set-2.html b/course-rom/homework/problem-set-2.html index fd4e0f7..0c25ced 100644 --- a/course-rom/homework/problem-set-2.html +++ b/course-rom/homework/problem-set-2.html @@ -275,7 +275,7 @@

-
directory=/scratch/prudhomm/actions-runner-1/_work/course-rom/course-rom
+
directory=/nvme0/prudhomm/github-actions/actions-runner-3/_work/course-rom/course-rom
@@ -460,26 +460,26 @@

-
+
@@ -646,7 +646,7 @@

  1. -

    Verify that, for \(\mu_0 = {0.4, 0.6, 0.8, 1.2, 0.15}\), i.e. \(Bi = 0.15\), the value of the output is \({T_{root}}_N ( \mu 0) = 1.61\).

    +

    Verify that, for \(\mu_0 = {0.4, 0.6, 0.8, 1.2, 0.15}\), i.e. \(Bi = 0.15\), the value of the output is \({T_{root}}_N ( \mu 0) =1.53\).

  2. We next introduce a regular test sample, \(\Xi_{test} \subset D\), of size \(ntest =100\) (in Python you can simply use linspace(0.01, 1, 100) to generate \(\Xi_{test}\)). Plot the convergence of the maximum relative error in the energy norm \(\max_{\mu \in\Xi_{test}} |||u( \mu ) - u_N ( \mu )|||_\mu /|||u( \mu diff --git a/sitemap.xml b/sitemap.xml index 8202833..33ee71c 100644 --- a/sitemap.xml +++ b/sitemap.xml @@ -2,50 +2,50 @@ https://feelpp.github.io/course-rom/course-rom/env/antora.html -2023-11-28T14:53:53.749Z +2023-11-28T15:46:37.632Z https://feelpp.github.io/course-rom/course-rom/env/cmake.html -2023-11-28T14:53:53.749Z +2023-11-28T15:46:37.632Z https://feelpp.github.io/course-rom/course-rom/env/githubactions.html -2023-11-28T14:53:53.749Z +2023-11-28T15:46:37.632Z https://feelpp.github.io/course-rom/course-rom/env/jupyter.html -2023-11-28T14:53:53.749Z +2023-11-28T15:46:37.632Z https://feelpp.github.io/course-rom/course-rom/env/rename.html -2023-11-28T14:53:53.749Z +2023-11-28T15:46:37.632Z https://feelpp.github.io/course-rom/course-rom/env/vscode.html -2023-11-28T14:53:53.749Z +2023-11-28T15:46:37.632Z https://feelpp.github.io/course-rom/course-rom/homework/problem-set-1.html -2023-11-28T14:53:53.749Z +2023-11-28T15:46:37.632Z https://feelpp.github.io/course-rom/course-rom/homework/problem-set-2.html -2023-11-28T14:53:53.749Z +2023-11-28T15:46:37.632Z https://feelpp.github.io/course-rom/course-rom/homework/problem-set-3.html -2023-11-28T14:53:53.749Z +2023-11-28T15:46:37.632Z https://feelpp.github.io/course-rom/course-rom/index.html -2023-11-28T14:53:53.749Z +2023-11-28T15:46:37.632Z https://feelpp.github.io/course-rom/course-rom/overview.html -2023-11-28T14:53:53.749Z +2023-11-28T15:46:37.632Z https://feelpp.github.io/course-rom/course-rom/quickstart.html -2023-11-28T14:53:53.749Z +2023-11-28T15:46:37.632Z