From 54c27c2d97417dfa4a1fa359171096e04fc55f70 Mon Sep 17 00:00:00 2001 From: QBatista Date: Sat, 4 Apr 2020 15:38:05 +0900 Subject: [PATCH 1/9] FEAT: Add `amf.py` --- quantecon/amf.py | 362 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 362 insertions(+) create mode 100644 quantecon/amf.py diff --git a/quantecon/amf.py b/quantecon/amf.py new file mode 100644 index 000000000..68b0aa5f9 --- /dev/null +++ b/quantecon/amf.py @@ -0,0 +1,362 @@ +""" +A module for working with additive and multiplicative functionals. + +""" + +import numpy as np +import scipy.linalg as la +import quantecon as qe +from collections import namedtuple +import warnings + + +ad_lss_var = namedtuple('additive_decomp', 'ν H g') +md_lss_var = namedtuple('multiplicative_decomp', 'ν_tilde H g') + + +class AMF_LSS_VAR: + """ + A class for transforming an additive (multiplicative) functional into a + QuantEcon linear state space system. It uses the first-order VAR + representation to build the LSS representation using the + `LinearStateSpace` class. + + First-order VAR representation: + + .. math:: + + x_{t+1} = Ax_{t} + Bz_{t+1} + + y_{t+1}-y_{t} = ν + Dx_{t} + Fz_{t+1} + + Linear State Space (LSS) representation: + + .. math:: + + \hat{x}_{t+1} = \hat{A}\hat{x}_{t}+\hat{B}z_{t+1} + + \hat{y}_{t} = \hat{D}\hat{x}_{t} + + Parameters + ---------- + A : array_like(float, ndim=2) + Part of the first-order vector autoregression equation. It should be an + `nx x nx` matrix. + + B : array_like(float, ndim=2) + Part of the first-order vector autoregression equation. It should be an + `nx x nk` matrix. + + D : array_like(float, dim=2) + Part of the nonstationary random process. It should be an `ny x nx` + matrix. + + F : array_like or None, optional(default=None) + Part of the nonstationary random process. If array_like, it should be + an `ny x nk` matrix. + + ν : array_like or float or None, optional(default=None) + Part of the nonstationary random process. If array_like, it should be + an `ny x 1` matrix. + + Attributes + ---------- + A, B, D, F, ν : See Parameters. + + additive_decomp : namedtuple + A namedtuple containing the following items: + :: + + "ν" : unconditional mean difference in Y + "H" : coefficient for the (linear) martingale component + "g" : coefficient for the stationary component g(x) + + multiplicative_decomp : namedtuple + A namedtuple containing the following items: + :: + + "ν_tilde" : eigenvalue + "H" : coefficient for the (linear) martingale component + "g" : coefficient for the stationary component g(x) + + """ + def __init__(self, A, B, D, F=None, ν=None): + # = Set Inputs = # + self.A = np.asarray(A) + self.B = np.asarray(B) + self._nx, self._nk = self.B.shape + self.D = np.asarray(D) + self._ny = self.D.shape[0] + + if hasattr(F, '__getitem__'): + self.F = np.asarray(F) # F is array_like + else: + self.F = np.zeros((self._nk, self._nk)) + + if hasattr(ν, '__getitem__') or isinstance(ν, float): + self.ν = np.asarray(ν) # ν is array_like or float + else: + self.ν = np.zeros((self._ny, 1)) + + # = Check dimensions = # + self._attr_dims_check() + + # = Check shape = # + self._attr_shape_check() + + # = Compute Additive Decomposition = # + eye = np.identity(self._nx) + A_res = la.solve(eye - self.A, eye) + g = self.D @ A_res + H = F + D @ A_res @ self.B + + self.additive_decomp = ad_lss_var(ν, H, g) + + # = Compute Multiplicative Decomposition = # + ν_tilde = ν + (.5) * np.expand_dims(np.diag(H @ H.T), 1) + self.multiplicative_decomp = md_lss_var(ν_tilde, H, g) + + # = Construct LSS = # + nx0c = np.zeros((self._nx, 1)) + nx0r = np.zeros(self._nx) + nx1 = np.ones(self._nx) + nk0 = np.zeros(self._nk) + ny0c = np.zeros((self._ny, 1)) + ny0r = np.zeros(self._ny) + ny1m = np.eye(self._ny) + ny0m = np.zeros((self._ny, self._ny)) + nyx0m = np.zeros_like(self.D) + + x0 = self._construct_x0(nx0r, ny0r) + A_bar = self._construct_A_bar(x0, nx0c, nyx0m, ny0c, ny1m, ny0m) + B_bar = self._construct_B_bar(nk0, H) + G_Bar = self._construct_G_bar(nx0c, self._nx, nyx0m, ny0c, ny1m, ny0m, + g) + H_bar = self._construct_H_bar(self._nx, self._ny, self._nk) + Sigma_0 = self._construct_Sigma_0(x0) + + self._lss = qe.LinearStateSpace(A_bar, B_bar, G_Bar, H_bar, mu_0=x0, + Sigma_0=Sigma_0) + + def _construct_x0(self, nx0r, ny0r): + x0 = np.hstack([1, 0, nx0r, ny0r, ny0r]) + + return x0 + + def _construct_A_bar(self, x0, nx0c, nyx0m, ny0c, ny1m, ny0m): + # Build A matrix for LSS + # Order of states is: [1, t, x_{t}, y_{t}, m_{t}] + + # Transition for 1 + A1 = x0.copy() + + # Transition for t + A2 = x0.copy() + A2[1] = 1. + + # Transition for x_{t+1} + A3 = np.hstack([nx0c, nx0c, self.A, nyx0m.T, nyx0m.T]) + + # Transition for y_{t+1} + A4 = np.hstack([self.ν, ny0c, self.D, ny1m, ny0m]) + + # Transition for m_{t+1} + A5 = np.hstack([ny0c, ny0c, nyx0m, ny0m, ny1m]) + + A_bar = np.vstack([A1, A2, A3, A4, A5]) + + return A_bar + + def _construct_B_bar(self, nk0, H): + # Build B matrix for LSS + B_bar = np.vstack([nk0, nk0, self.B, self.F, H]) + + return B_bar + + def _construct_G_bar(self, nx0c, nx, nyx0m, ny0c, ny1m, ny0m, g): + # Build G matrix for LSS + # Order of observation is: [x_{t}, y_{t}, m_{t}, s_{t}, tau_{t}] + + # Selector for x_{t} + G1 = np.hstack([nx0c, nx0c, np.eye(nx), nyx0m.T, nyx0m.T]) + + # Selector for y_{t} + G2 = np.hstack([ny0c, ny0c, nyx0m, ny1m, ny0m]) + + # Selector for martingale m_{t} + G3 = np.hstack([ny0c, ny0c, nyx0m, ny0m, ny1m]) + + # Selector for stationary s_{t} + G4 = np.hstack([ny0c, ny0c, -g, ny0m, ny0m]) + + # Selector for trend tau_{t} + G5 = np.hstack([ny0c, self.ν, nyx0m, ny0m, ny0m]) + + G_bar = np.vstack([G1, G2, G3, G4, G5]) + + return G_bar + + def _construct_H_bar(self, nx, ny, nk): + # Build H matrix for LSS + H_bar = np.zeros((2 + nx + 2 * ny, nk)) + + return H_bar + + def _construct_Sigma_0(self, x0): + Sigma_0 = np.zeros((len(x0), len(x0))) + + return Sigma_0 + + def _attr_dims_check(self): + """Check the dimensions of attributes.""" + + inputs = {'A': self.A, 'B': self.B, 'D': self.D, 'F': self.F, + 'ν': self.ν} + + for input_name, input_val in inputs.items(): + if input_val.ndim != 2: + raise ValueError(input_name + ' must have 2 dimensions.') + + def _attr_shape_check(self): + """Check the shape of attributes.""" + + same_dim_pairs = {'first': (0, {'A and B': [self.A, self.B], + 'D and F': [self.D, self.F], + 'D and ν': [self.D, self.ν]}), + 'second': (1, {'A and D': [self.A, self.D], + 'B and F': [self.B, self.F]})} + + for dim_name, (dim_idx, pairs) in same_dim_pairs.items(): + for pair_name, (e0, e1) in pairs.items(): + if e0.shape[dim_idx] != e1.shape[dim_idx]: + raise ValueError('The ' + dim_name + ' dimensions of ' + + pair_name + ' must match.') + + if self.A.shape[0] != self.A.shape[1]: + raise ValueError('A (shape: %s) must be a square matrix.' % + (self.A.shape, )) + + # F.shape[0] == ν.shape[0] holds by transitivity + # Same for D.shape[1] == B.shape[0] == A.shape[0] + + def loglikelihood_path(self, x, y): + """ + Computes the log-likelihood path associated with a path of additive + functionals `x` and `y` and assuming standard normal shocks. + + Parameters + ---------- + x : ndarray(float, ndim=1) + A path of observations for the state variable. + + y : ndarray(float, ndim=1) + A path of observations for the random process + + Returns + -------- + llh : ndarray(float, ndim=1) + An array containing the loglikelihood path. + + """ + + k, T = y.shape + FF = self.F @ self.F.T + FF_inv = la.inv(FF) + temp = y[:, 1:] - y[:, :-1] - self.D @ x[:, :-1] + obs = temp * FF_inv * temp + obssum = np.cumsum(obs) + scalar = (np.log(la.det(FF)) + k * np.log(2 * np.pi)) * np.arange(1, T) + + llh = (-0.5) * (obssum + scalar) + + return llh + + +def pth_order_to_stacked_1st_order(ζ_hat, A_hats): + """ + Construct the first order stacked representation of a VAR from the pth + order representation. + + Parameters + ---------- + ζ_hat : ndarray(float, ndim=1) + Vector of constants of the pth order VAR. + + A_hats : tuple + Sequence of `ρ` matrices of shape `n x n` of lagged coefficients of + the pth order VAR. + + Returns + ---------- + ζ : ndarray(float, ndim=1) + Vector of constants of the 1st order stacked VAR. + + A : ndarray(float, ndim=2) + Matrix of coefficients of the 1st order stacked VAR. + + """ + ρ = len(A_hats) + n = A_hats[0].shape[0] + + A = np.zeros((n * ρ, n * ρ)) + A[:n, :] = np.hstack(A_hats) + A[n:, :n*(ρ-1)] = np.eye(n * (ρ - 1)) + + ζ = np.zeros(n * ρ) + ζ[:n] = np.eye(n) @ ζ_hat + + return ζ, A + + +def compute_BQ_restricted_B_0(A_hats, Ω_hat): + """ + Compute the `B_0` matrix for `AMF_LSS_VAR` using the Blanchard and Quah + method to impose long-run restrictions. + + Parameters + ---------- + A_hats : tuple + Sequence of `ρ` matrices of shape `n x n` of lagged coefficients of + the pth order VAR. + + Ω_hat : ndarray(float, ndim=2) + Covariance matrix of the error term. + + Returns + ---------- + B_0 : ndarray(float, ndim=2) + Matrix satisfying :math:`\hat{\Omega}=B_{0}B_{0}^{\intercal}`, where + :math:`B_{0}` is identified using the Blanchard and Quah method. + + References + ---------- + .. [1] Lars Peter Hansen and Thomas J. Sargent. Risk, Uncertainty, and + Value. Princeton, New Jersey: Princeton University Press., 2018. + + """ + ρ = len(A_hats) + + # Step 1: Compute the spectral density of V_{t} at frequency zero + def A_hat(z): + return np.eye(ρ) - sum([A_hats[i] * z ** i for i in range(ρ)]) + A_hat_1 = A_hat(1) + + accuracy_loss = np.log10(np.linalg.cond(A_hat_1)).round().astype(int) + if accuracy_loss >= 8: + warnings.warn('The `A_hat(1)` matrix is ill-conditioned. ' + + ' Approximately ' + accuracy_loss + ' digits may be' + + ' lost due to matrix inversion.') + + A_hat_1_inv = np.linalg.inv(A_hat_1) + R = A_hat_1_inv @ Ω_hat @ A_hat_1_inv.T + + # Step 2: Compute the Cholesky decomposition of R + R_chol = np.linalg.cholesky(R) + + # Step 3: Compute B_0 + B_0 = A_hat_1 @ R_chol + + if not np.abs(B_0 @ B_0.T - Ω_hat).max() < 1e-10: + raise ValueError('The process of identifying `B_0` failed.') + + return B_0 From a9d109d796cd842bcc41c6ee3bc0452f3a9f6be4 Mon Sep 17 00:00:00 2001 From: QBatista Date: Sat, 4 Apr 2020 15:40:05 +0900 Subject: [PATCH 2/9] TEST: Add tests for `amf.py` --- quantecon/tests/test_amf.py | 245 ++++++++++++++++++++++++++++++++++++ 1 file changed, 245 insertions(+) create mode 100644 quantecon/tests/test_amf.py diff --git a/quantecon/tests/test_amf.py b/quantecon/tests/test_amf.py new file mode 100644 index 000000000..f990e342f --- /dev/null +++ b/quantecon/tests/test_amf.py @@ -0,0 +1,245 @@ +""" +Tests for amf.py + +""" + +import numpy as np +from numpy.testing import assert_array_equal, assert_allclose +from quantecon import (AMF_LSS_VAR, pth_order_to_stacked_1st_order, + compute_BQ_restricted_B_0) +from scipy.stats import multivariate_normal as mvn +from nose.tools import assert_raises + + +class TestAMFLSSVAR: + def setUp(self): + ϕ_1, ϕ_2, ϕ_3, ϕ_4 = 0.5, -0.2, 0, 0.5 + σ = 0.01 + self.ν = np.array([[0.01]]) # Growth rate + + # A matrix should be n x n + self.A = np.array([[ϕ_1, ϕ_2, ϕ_3, ϕ_4], + [1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, 1, 0]]) + + # B matrix should be n x k + self.B = np.array([[σ, 0, 0, 0]]).T + + self.D = np.array([[1, 0, 0, 0]]) @ self.A + self.F = np.array([[1, 0, 0, 0]]) @ self.B + + self.amf = AMF_LSS_VAR(self.A, self.B, self.D, self.F, self.ν) + + def test__construct_x0(self): + ny0r = np.ones(2) + nx0r = 2. * np.ones(3) + x0 = self.amf._construct_x0(nx0r, ny0r) + + x0_sol = np.array([1., 0., 2., 2., 2., 1., 1., 1., 1.]) + + assert_array_equal(x0, x0_sol) + + def test__construct_A_bar(self): + x0 = np.ones(2 + self.amf._nx + 2 * self.amf._ny) + nx0c = 3 * np.ones((self.amf._nx, 1)) + nyx0m = 4 * np.ones_like(self.D) + ny0c = 5 * np.ones((self.amf._ny, 1)) + ny1m = 6 * np.eye(self.amf._ny) + ny0m = 7 * np.ones((self.amf._ny, self.amf._ny)) + + A_bar = self.amf._construct_A_bar(x0, nx0c, nyx0m, ny0c, ny1m, ny0m) + + A1_2_sol = np.array([[1., 1., 1., 1., 1., 1., 1., 1.], + [1., 1., 1., 1., 1., 1., 1., 1.]]) + + A3_sol = np.hstack([[[3.], [3.], [3.], [3.]], + [[3.], [3.], [3.], [3.]], + self.A, + [[4.], [4.], [4.], [4.]], + [[4.], [4.], [4.], [4.]]]) + + A4_sol = np.hstack([self.ν, [[5.]], self.D, [[6.]], [[7.]]]) + + A5_sol = np.array([5., 5., 4., 4., 4., 4., 7., 6.]) + + A_bar_sol = np.vstack([A1_2_sol, A3_sol, A4_sol, A5_sol]) + + assert_array_equal(A_bar, A_bar_sol) + + def test__construct_B_bar(self): + nk0 = np.ones(self.amf._nk) + H = 2 * np.ones((self.amf._nk, self.amf._nk)) + B_bar = self.amf._construct_B_bar(nk0, H) + + B_bar_sol = np.vstack([nk0, nk0, self.B, self.F, H]) + + assert_array_equal(B_bar, B_bar_sol) + + def test__construct_G_bar(self): + nx0c = np.ones((self.amf._nx, 1)) + nyx0m = 2 * np.ones_like(self.D) + ny0c = 3 * np.ones((self.amf._ny, 1)) + ny1m = 4 * np.eye(self.amf._ny) + ny0m = 5 * np.ones((self.amf._ny, self.amf._ny)) + g = self.amf.additive_decomp[2] + + G_bar = self.amf._construct_G_bar(nx0c, self.amf._nx, nyx0m, ny0c, + ny1m, ny0m, g) + + G_1_2_3_sol = np.array([[1., 1., 1., 0., 0., 0., 2., 2.], + [1., 1., 0., 1., 0., 0., 2., 2.], + [1., 1., 0., 0., 1., 0., 2., 2.], + [1., 1., 0., 0., 0., 1., 2., 2.], + [3., 3., 2., 2., 2., 2., 4., 5.], + [3., 3., 2., 2., 2., 2., 5., 4.]]) + + G_4_sol = np.hstack([[[3.]], [[3.]], -g, [[5.]], [[5.]]]) + + G_5_sol = np.hstack([[[3.]], self.ν, [[2., 2., 2., 2.]], [[5.]], + [[5.]]]) + + G_bar_sol = np.vstack([G_1_2_3_sol, G_4_sol, G_5_sol]) + + assert_array_equal(G_bar, G_bar_sol) + + def test__construct_H_bar(self): + nx, ny, nk = 2, 3, 5 + H_bar = self.amf._construct_H_bar(nx, ny, nk) + + H_bar_sol = np.array([[0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0.]]) + + assert_array_equal(H_bar, H_bar_sol) + + def test__construct_Sigma_0(self): + x0 = np.array([1., 2., 3.]) + Sigma_0 = self.amf._construct_Sigma_0(x0) + Sigma_0_sol = np.array([[0., 0., 0.], + [0., 0., 0.], + [0., 0., 0.]]) + + assert_array_equal(Sigma_0, Sigma_0_sol) + + def test_invalid_dimensions(self): + inputs = (self.A, self.B, self.D, self.F, self.ν) + inputs_passed = list(inputs) + invalid_inputs = [[], np.array([]), ()] + + for invalid_input in invalid_inputs: + for i in range(len(inputs)): + inputs_passed[i] = invalid_input # Set input i to be invalid + with assert_raises(ValueError): + AMF_LSS_VAR(*inputs_passed) + + inputs_passed[i] = inputs[i] # Restore original input + + def test_invalid_shape(self): + inputs = (self.A, self.B, self.D, self.F, self.ν) + inputs_passed = list(inputs) + invalid_input = np.eye(10) + + for i in range(len(inputs)): + inputs_passed[i] = invalid_input # Set input i to be invalid + with assert_raises(ValueError): + AMF_LSS_VAR(*inputs_passed) + + inputs_passed[i] = inputs[i] # Restore original input + + def test_non_square_A(self): + A = np.zeros((1, 3)) + B = np.zeros((1, 4)) + D = np.zeros((2, 3)) + F = np.zeros((2, 4)) + ν = np.zeros((2, 1)) + + with assert_raises(ValueError): + AMF_LSS_VAR(A, B, D, F, ν) + + def test_loglikelihood(self): + x = np.random.rand(4, 10) * 0.005 + y = np.random.rand(1, 10) * 0.005 + + temp = y[:, 1:] - y[:, :-1] - self.D @ x[:, :-1] + + llh = self.amf.loglikelihood_path(x, y) + + cov = self.F @ self.F.T + + llh_sol_scipy = np.cumsum(np.log([mvn.pdf(obs, mean=0, cov=cov) + for obs in temp])) + + assert_allclose(llh, llh_sol_scipy) + + +def test_pth_order_to_stacked_1st_order(): + # First test + n = 2 + p = 5 + + ζ_hat = np.array([1., 2.]) + + A_hats = ([(i + 1) * np.eye(n) for i in range(p)]) + + ζ, A = pth_order_to_stacked_1st_order(ζ_hat, A_hats) + + A_sol = np.array([[1., 0., 2., 0., 3., 0., 4., 0., 5., 0.], + [0., 1., 0., 2., 0., 3., 0., 4., 0., 5.], + [1., 0., 0., 0., 0., 0., 0., 0., 0., 0.], + [0., 1., 0., 0., 0., 0., 0., 0., 0., 0.], + [0., 0., 1., 0., 0., 0., 0., 0., 0., 0.], + [0., 0., 0., 1., 0., 0., 0., 0., 0., 0.], + [0., 0., 0., 0., 1., 0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0., 1., 0., 0., 0., 0.], + [0., 0., 0., 0., 0., 0., 1., 0., 0., 0.], + [0., 0., 0., 0., 0., 0., 0., 1., 0., 0.]]) + + ζ_sol = np.array([1., 2., 0., 0., 0., 0., 0., 0., 0., 0.]) + + assert_array_equal(ζ, ζ_sol) + assert_array_equal(A, A_sol) + + # Second test + A_hats = (np.array([[.1, -.07, .03], [.4, .01, -.05], [.01, -.1, .6]]), + np.array([[-.8, .2, .02], [-.3, -.07, 0.05], [-.02, .1, -.09]])) + + ζ_hat = np.array([0.01, 0.02, 0.03]) + + ζ, A = pth_order_to_stacked_1st_order(ζ_hat, A_hats) + + ζ_sol = np.array([0.01, 0.02, 0.03, 0., 0., 0.]) + + A_sol = np.array([[.1, -.07, .03, -.8, .2, .02], + [.4, .01, -.05, -.3, -.07, 0.05], + [.01, -.1, .6, -.02, .1, -.09], + [1., 0., 0., 0., 0., 0.], + [0., 1., 0., 0., 0., 0.], + [0., 0., 1., 0., 0., 0.]]) + + assert_array_equal(ζ, ζ_sol) + assert_array_equal(A, A_sol) + + +def test_compute_BQ_restricted_B_0(): + A = np.array([[0.9, -0.2], + [0.3, 0.6]]) + + Ω_hat = np.array([[0.001, -0.0005], + [-0.0005, 0.001]]) + + A_hats = (A, ) + + B_0 = compute_BQ_restricted_B_0(A_hats, Ω_hat) + + B_0_sol = np.array([[-0.02192645, 0.02278664], + [0.03069703, 0.00759555]]) + + assert_allclose(B_0, B_0_sol, rtol=1e-6) From 65f8880bf56cdc2c50bb428bb89cf1c7cf74e649 Mon Sep 17 00:00:00 2001 From: QBatista Date: Sat, 4 Apr 2020 15:41:06 +0900 Subject: [PATCH 3/9] Update `__init__.py` file --- quantecon/__init__.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/quantecon/__init__.py b/quantecon/__init__.py index 090e3b85f..955d364c1 100644 --- a/quantecon/__init__.py +++ b/quantecon/__init__.py @@ -18,6 +18,8 @@ from . import optimize #-Objects-# +from .amf import (AMF_LSS_VAR, pth_order_to_stacked_1st_order, + compute_BQ_restricted_B_0) from .compute_fp import compute_fixed_point from .discrete_rv import DiscreteRV from .dle import DLE From 8621fe7727ee4f6671681239cc00d1e6f38fd82f Mon Sep 17 00:00:00 2001 From: QBatista Date: Sat, 4 Apr 2020 16:34:54 +0900 Subject: [PATCH 4/9] Minor fixes --- quantecon/amf.py | 50 ++++++++++++++++++++++++------------------------ 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/quantecon/amf.py b/quantecon/amf.py index 68b0aa5f9..130bb626f 100644 --- a/quantecon/amf.py +++ b/quantecon/amf.py @@ -82,21 +82,21 @@ class AMF_LSS_VAR: """ def __init__(self, A, B, D, F=None, ν=None): # = Set Inputs = # - self.A = np.asarray(A) - self.B = np.asarray(B) - self._nx, self._nk = self.B.shape - self.D = np.asarray(D) - self._ny = self.D.shape[0] + self.A = np.atleast_2d(A) + self.B = np.atleast_2d(B) + self.nx, self.nk = self.B.shape + self.D = np.atleast_2d(D) + self.ny = self.D.shape[0] if hasattr(F, '__getitem__'): - self.F = np.asarray(F) # F is array_like + self.F = np.atleast_2d(F) # F is array_like else: - self.F = np.zeros((self._nk, self._nk)) + self.F = np.zeros((self.nk, self.nk)) if hasattr(ν, '__getitem__') or isinstance(ν, float): - self.ν = np.asarray(ν) # ν is array_like or float + self.ν = np.atleast_2d(ν) # ν is array_like or float else: - self.ν = np.zeros((self._ny, 1)) + self.ν = np.zeros((self.ny, 1)) # = Check dimensions = # self._attr_dims_check() @@ -105,38 +105,38 @@ def __init__(self, A, B, D, F=None, ν=None): self._attr_shape_check() # = Compute Additive Decomposition = # - eye = np.identity(self._nx) + eye = np.identity(self.nx) A_res = la.solve(eye - self.A, eye) g = self.D @ A_res - H = F + D @ A_res @ self.B + H = self.F + self.D @ A_res @ self.B - self.additive_decomp = ad_lss_var(ν, H, g) + self.additive_decomp = ad_lss_var(self.ν, H, g) # = Compute Multiplicative Decomposition = # - ν_tilde = ν + (.5) * np.expand_dims(np.diag(H @ H.T), 1) + ν_tilde = self.ν + (.5) * np.expand_dims(np.diag(H @ H.T), 1) self.multiplicative_decomp = md_lss_var(ν_tilde, H, g) # = Construct LSS = # - nx0c = np.zeros((self._nx, 1)) - nx0r = np.zeros(self._nx) - nx1 = np.ones(self._nx) - nk0 = np.zeros(self._nk) - ny0c = np.zeros((self._ny, 1)) - ny0r = np.zeros(self._ny) - ny1m = np.eye(self._ny) - ny0m = np.zeros((self._ny, self._ny)) + nx0c = np.zeros((self.nx, 1)) + nx0r = np.zeros(self.nx) + nx1 = np.ones(self.nx) + nk0 = np.zeros(self.nk) + ny0c = np.zeros((self.ny, 1)) + ny0r = np.zeros(self.ny) + ny1m = np.eye(self.ny) + ny0m = np.zeros((self.ny, self.ny)) nyx0m = np.zeros_like(self.D) x0 = self._construct_x0(nx0r, ny0r) A_bar = self._construct_A_bar(x0, nx0c, nyx0m, ny0c, ny1m, ny0m) B_bar = self._construct_B_bar(nk0, H) - G_Bar = self._construct_G_bar(nx0c, self._nx, nyx0m, ny0c, ny1m, ny0m, + G_Bar = self._construct_G_bar(nx0c, self.nx, nyx0m, ny0c, ny1m, ny0m, g) - H_bar = self._construct_H_bar(self._nx, self._ny, self._nk) + H_bar = self._construct_H_bar(self.nx, self.ny, self.nk) Sigma_0 = self._construct_Sigma_0(x0) - self._lss = qe.LinearStateSpace(A_bar, B_bar, G_Bar, H_bar, mu_0=x0, - Sigma_0=Sigma_0) + self.lss = qe.LinearStateSpace(A_bar, B_bar, G_Bar, H_bar, mu_0=x0, + Sigma_0=Sigma_0) def _construct_x0(self, nx0r, ny0r): x0 = np.hstack([1, 0, nx0r, ny0r, ny0r]) From a567e23a102efa43cf055917b31404d21639cb81 Mon Sep 17 00:00:00 2001 From: QBatista Date: Sat, 4 Apr 2020 17:16:35 +0900 Subject: [PATCH 5/9] TEST: Minor fix --- quantecon/tests/test_amf.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/quantecon/tests/test_amf.py b/quantecon/tests/test_amf.py index f990e342f..31700ab2a 100644 --- a/quantecon/tests/test_amf.py +++ b/quantecon/tests/test_amf.py @@ -41,12 +41,12 @@ def test__construct_x0(self): assert_array_equal(x0, x0_sol) def test__construct_A_bar(self): - x0 = np.ones(2 + self.amf._nx + 2 * self.amf._ny) - nx0c = 3 * np.ones((self.amf._nx, 1)) + x0 = np.ones(2 + self.amf.nx + 2 * self.amf.ny) + nx0c = 3 * np.ones((self.amf.nx, 1)) nyx0m = 4 * np.ones_like(self.D) - ny0c = 5 * np.ones((self.amf._ny, 1)) - ny1m = 6 * np.eye(self.amf._ny) - ny0m = 7 * np.ones((self.amf._ny, self.amf._ny)) + ny0c = 5 * np.ones((self.amf.ny, 1)) + ny1m = 6 * np.eye(self.amf.ny) + ny0m = 7 * np.ones((self.amf.ny, self.amf.ny)) A_bar = self.amf._construct_A_bar(x0, nx0c, nyx0m, ny0c, ny1m, ny0m) @@ -68,8 +68,8 @@ def test__construct_A_bar(self): assert_array_equal(A_bar, A_bar_sol) def test__construct_B_bar(self): - nk0 = np.ones(self.amf._nk) - H = 2 * np.ones((self.amf._nk, self.amf._nk)) + nk0 = np.ones(self.amf.nk) + H = 2 * np.ones((self.amf.nk, self.amf.nk)) B_bar = self.amf._construct_B_bar(nk0, H) B_bar_sol = np.vstack([nk0, nk0, self.B, self.F, H]) @@ -77,14 +77,14 @@ def test__construct_B_bar(self): assert_array_equal(B_bar, B_bar_sol) def test__construct_G_bar(self): - nx0c = np.ones((self.amf._nx, 1)) + nx0c = np.ones((self.amf.nx, 1)) nyx0m = 2 * np.ones_like(self.D) - ny0c = 3 * np.ones((self.amf._ny, 1)) - ny1m = 4 * np.eye(self.amf._ny) - ny0m = 5 * np.ones((self.amf._ny, self.amf._ny)) + ny0c = 3 * np.ones((self.amf.ny, 1)) + ny1m = 4 * np.eye(self.amf.ny) + ny0m = 5 * np.ones((self.amf.ny, self.amf.ny)) g = self.amf.additive_decomp[2] - G_bar = self.amf._construct_G_bar(nx0c, self.amf._nx, nyx0m, ny0c, + G_bar = self.amf._construct_G_bar(nx0c, self.amf.nx, nyx0m, ny0c, ny1m, ny0m, g) G_1_2_3_sol = np.array([[1., 1., 1., 0., 0., 0., 2., 2.], From b8ebbb84f266cb7956b4c4f59dca7892b5355932 Mon Sep 17 00:00:00 2001 From: QBatista Date: Thu, 9 Apr 2020 15:04:37 +0900 Subject: [PATCH 6/9] DOC: Improve documentation --- quantecon/__init__.py | 3 +- quantecon/amf.py | 156 +++++++++++++----------------------- quantecon/tests/test_amf.py | 68 +--------------- 3 files changed, 57 insertions(+), 170 deletions(-) diff --git a/quantecon/__init__.py b/quantecon/__init__.py index 955d364c1..e9d5625b1 100644 --- a/quantecon/__init__.py +++ b/quantecon/__init__.py @@ -18,8 +18,7 @@ from . import optimize #-Objects-# -from .amf import (AMF_LSS_VAR, pth_order_to_stacked_1st_order, - compute_BQ_restricted_B_0) +from .amf import AMF_LSS_VAR from .compute_fp import compute_fixed_point from .discrete_rv import DiscreteRV from .dle import DLE diff --git a/quantecon/amf.py b/quantecon/amf.py index 130bb626f..d7bd31ded 100644 --- a/quantecon/amf.py +++ b/quantecon/amf.py @@ -61,7 +61,10 @@ class AMF_LSS_VAR: Attributes ---------- - A, B, D, F, ν : See Parameters. + A, B, D, F, ν, nx, nk, ny : See Parameters. + + lss : Instance of `LinearStateSpace`. + LSS representation of the additive (multiplicative) functional. additive_decomp : namedtuple A namedtuple containing the following items: @@ -79,6 +82,42 @@ class AMF_LSS_VAR: "H" : coefficient for the (linear) martingale component "g" : coefficient for the stationary component g(x) + Examples + ---------- + Consider the following example: + + >>> ϕ_1, ϕ_2, ϕ_3, ϕ_4 = 0.5, -0.2, 0, 0.5 + >>> σ = 0.01 + >>> ν = 0.01 # Growth rate + >>> A = np.array([[ϕ_1, ϕ_2, ϕ_3, ϕ_4], + ... [ 1, 0, 0, 0], + ... [ 0, 1, 0, 0], + ... [ 0, 0, 1, 0]]) + >>> B = np.array([[σ, 0, 0, 0]]).T + >>> D = np.array([[1, 0, 0, 0]]) @ A + >>> F = np.array([[1, 0, 0, 0]]) @ B + >>> amf = qe.AMF_LSS_VAR(A, B, D, F, ν=ν) + + The additive decomposition can be accessed by: + + >>> amf.multiplicative_decomp + additive_decomp(ν=array([[0.01]]), H=array([[0.05]]), + g=array([[4. , 1.5, 2.5, 2.5]])) + + The multiplicative decomposition can be accessed by: + + >>> amf.multiplicative_decomp + multiplicative_decomp(ν_tilde=array([[0.01125]]), H=array([[0.05]]), + g=array([[4. , 1.5, 2.5, 2.5]])) + + References + ---------- + .. [1] Lars Peter Hansen and Thomas J Sargent. Robustness. Princeton + university press, 2008. + + .. [2] Lars Peter Hansen and José A Scheinkman. Long-term risk: An operator + approach. Econometrica, 77(1):177–234, 2009. + """ def __init__(self, A, B, D, F=None, ν=None): # = Set Inputs = # @@ -130,23 +169,25 @@ def __init__(self, A, B, D, F=None, ν=None): x0 = self._construct_x0(nx0r, ny0r) A_bar = self._construct_A_bar(x0, nx0c, nyx0m, ny0c, ny1m, ny0m) B_bar = self._construct_B_bar(nk0, H) - G_Bar = self._construct_G_bar(nx0c, self.nx, nyx0m, ny0c, ny1m, ny0m, + G_bar = self._construct_G_bar(nx0c, self.nx, nyx0m, ny0c, ny1m, ny0m, g) H_bar = self._construct_H_bar(self.nx, self.ny, self.nk) Sigma_0 = self._construct_Sigma_0(x0) - self.lss = qe.LinearStateSpace(A_bar, B_bar, G_Bar, H_bar, mu_0=x0, + self.lss = qe.LinearStateSpace(A_bar, B_bar, G_bar, H_bar, mu_0=x0, Sigma_0=Sigma_0) def _construct_x0(self, nx0r, ny0r): + "Construct initial state x0 for LSS instance." + x0 = np.hstack([1, 0, nx0r, ny0r, ny0r]) return x0 def _construct_A_bar(self, x0, nx0c, nyx0m, ny0c, ny1m, ny0m): - # Build A matrix for LSS - # Order of states is: [1, t, x_{t}, y_{t}, m_{t}] + "Construct A matrix for LSS instance." + # Order of states is: [1, t, x_{t}, y_{t}, m_{t}] # Transition for 1 A1 = x0.copy() @@ -168,15 +209,15 @@ def _construct_A_bar(self, x0, nx0c, nyx0m, ny0c, ny1m, ny0m): return A_bar def _construct_B_bar(self, nk0, H): - # Build B matrix for LSS + "Construct B matrix for LSS instance." B_bar = np.vstack([nk0, nk0, self.B, self.F, H]) return B_bar def _construct_G_bar(self, nx0c, nx, nyx0m, ny0c, ny1m, ny0m, g): - # Build G matrix for LSS - # Order of observation is: [x_{t}, y_{t}, m_{t}, s_{t}, tau_{t}] + "Construct G matrix for LSS instance." + # Order of observation is: [x_{t}, y_{t}, m_{t}, s_{t}, tau_{t}] # Selector for x_{t} G1 = np.hstack([nx0c, nx0c, np.eye(nx), nyx0m.T, nyx0m.T]) @@ -197,18 +238,21 @@ def _construct_G_bar(self, nx0c, nx, nyx0m, ny0c, ny1m, ny0m, g): return G_bar def _construct_H_bar(self, nx, ny, nk): - # Build H matrix for LSS + "Construct H matrix for LSS instance." + H_bar = np.zeros((2 + nx + 2 * ny, nk)) return H_bar def _construct_Sigma_0(self, x0): + "Construct initial covariance matrix Sigma_0 for LSS instance." + Sigma_0 = np.zeros((len(x0), len(x0))) return Sigma_0 def _attr_dims_check(self): - """Check the dimensions of attributes.""" + "Check the dimensions of attributes." inputs = {'A': self.A, 'B': self.B, 'D': self.D, 'F': self.F, 'ν': self.ν} @@ -218,7 +262,7 @@ def _attr_dims_check(self): raise ValueError(input_name + ' must have 2 dimensions.') def _attr_shape_check(self): - """Check the shape of attributes.""" + "Check the shape of attributes." same_dim_pairs = {'first': (0, {'A and B': [self.A, self.B], 'D and F': [self.D, self.F], @@ -270,93 +314,3 @@ def loglikelihood_path(self, x, y): llh = (-0.5) * (obssum + scalar) return llh - - -def pth_order_to_stacked_1st_order(ζ_hat, A_hats): - """ - Construct the first order stacked representation of a VAR from the pth - order representation. - - Parameters - ---------- - ζ_hat : ndarray(float, ndim=1) - Vector of constants of the pth order VAR. - - A_hats : tuple - Sequence of `ρ` matrices of shape `n x n` of lagged coefficients of - the pth order VAR. - - Returns - ---------- - ζ : ndarray(float, ndim=1) - Vector of constants of the 1st order stacked VAR. - - A : ndarray(float, ndim=2) - Matrix of coefficients of the 1st order stacked VAR. - - """ - ρ = len(A_hats) - n = A_hats[0].shape[0] - - A = np.zeros((n * ρ, n * ρ)) - A[:n, :] = np.hstack(A_hats) - A[n:, :n*(ρ-1)] = np.eye(n * (ρ - 1)) - - ζ = np.zeros(n * ρ) - ζ[:n] = np.eye(n) @ ζ_hat - - return ζ, A - - -def compute_BQ_restricted_B_0(A_hats, Ω_hat): - """ - Compute the `B_0` matrix for `AMF_LSS_VAR` using the Blanchard and Quah - method to impose long-run restrictions. - - Parameters - ---------- - A_hats : tuple - Sequence of `ρ` matrices of shape `n x n` of lagged coefficients of - the pth order VAR. - - Ω_hat : ndarray(float, ndim=2) - Covariance matrix of the error term. - - Returns - ---------- - B_0 : ndarray(float, ndim=2) - Matrix satisfying :math:`\hat{\Omega}=B_{0}B_{0}^{\intercal}`, where - :math:`B_{0}` is identified using the Blanchard and Quah method. - - References - ---------- - .. [1] Lars Peter Hansen and Thomas J. Sargent. Risk, Uncertainty, and - Value. Princeton, New Jersey: Princeton University Press., 2018. - - """ - ρ = len(A_hats) - - # Step 1: Compute the spectral density of V_{t} at frequency zero - def A_hat(z): - return np.eye(ρ) - sum([A_hats[i] * z ** i for i in range(ρ)]) - A_hat_1 = A_hat(1) - - accuracy_loss = np.log10(np.linalg.cond(A_hat_1)).round().astype(int) - if accuracy_loss >= 8: - warnings.warn('The `A_hat(1)` matrix is ill-conditioned. ' + - ' Approximately ' + accuracy_loss + ' digits may be' + - ' lost due to matrix inversion.') - - A_hat_1_inv = np.linalg.inv(A_hat_1) - R = A_hat_1_inv @ Ω_hat @ A_hat_1_inv.T - - # Step 2: Compute the Cholesky decomposition of R - R_chol = np.linalg.cholesky(R) - - # Step 3: Compute B_0 - B_0 = A_hat_1 @ R_chol - - if not np.abs(B_0 @ B_0.T - Ω_hat).max() < 1e-10: - raise ValueError('The process of identifying `B_0` failed.') - - return B_0 diff --git a/quantecon/tests/test_amf.py b/quantecon/tests/test_amf.py index 31700ab2a..5da02bb2b 100644 --- a/quantecon/tests/test_amf.py +++ b/quantecon/tests/test_amf.py @@ -5,8 +5,7 @@ import numpy as np from numpy.testing import assert_array_equal, assert_allclose -from quantecon import (AMF_LSS_VAR, pth_order_to_stacked_1st_order, - compute_BQ_restricted_B_0) +from quantecon import AMF_LSS_VAR from scipy.stats import multivariate_normal as mvn from nose.tools import assert_raises @@ -178,68 +177,3 @@ def test_loglikelihood(self): for obs in temp])) assert_allclose(llh, llh_sol_scipy) - - -def test_pth_order_to_stacked_1st_order(): - # First test - n = 2 - p = 5 - - ζ_hat = np.array([1., 2.]) - - A_hats = ([(i + 1) * np.eye(n) for i in range(p)]) - - ζ, A = pth_order_to_stacked_1st_order(ζ_hat, A_hats) - - A_sol = np.array([[1., 0., 2., 0., 3., 0., 4., 0., 5., 0.], - [0., 1., 0., 2., 0., 3., 0., 4., 0., 5.], - [1., 0., 0., 0., 0., 0., 0., 0., 0., 0.], - [0., 1., 0., 0., 0., 0., 0., 0., 0., 0.], - [0., 0., 1., 0., 0., 0., 0., 0., 0., 0.], - [0., 0., 0., 1., 0., 0., 0., 0., 0., 0.], - [0., 0., 0., 0., 1., 0., 0., 0., 0., 0.], - [0., 0., 0., 0., 0., 1., 0., 0., 0., 0.], - [0., 0., 0., 0., 0., 0., 1., 0., 0., 0.], - [0., 0., 0., 0., 0., 0., 0., 1., 0., 0.]]) - - ζ_sol = np.array([1., 2., 0., 0., 0., 0., 0., 0., 0., 0.]) - - assert_array_equal(ζ, ζ_sol) - assert_array_equal(A, A_sol) - - # Second test - A_hats = (np.array([[.1, -.07, .03], [.4, .01, -.05], [.01, -.1, .6]]), - np.array([[-.8, .2, .02], [-.3, -.07, 0.05], [-.02, .1, -.09]])) - - ζ_hat = np.array([0.01, 0.02, 0.03]) - - ζ, A = pth_order_to_stacked_1st_order(ζ_hat, A_hats) - - ζ_sol = np.array([0.01, 0.02, 0.03, 0., 0., 0.]) - - A_sol = np.array([[.1, -.07, .03, -.8, .2, .02], - [.4, .01, -.05, -.3, -.07, 0.05], - [.01, -.1, .6, -.02, .1, -.09], - [1., 0., 0., 0., 0., 0.], - [0., 1., 0., 0., 0., 0.], - [0., 0., 1., 0., 0., 0.]]) - - assert_array_equal(ζ, ζ_sol) - assert_array_equal(A, A_sol) - - -def test_compute_BQ_restricted_B_0(): - A = np.array([[0.9, -0.2], - [0.3, 0.6]]) - - Ω_hat = np.array([[0.001, -0.0005], - [-0.0005, 0.001]]) - - A_hats = (A, ) - - B_0 = compute_BQ_restricted_B_0(A_hats, Ω_hat) - - B_0_sol = np.array([[-0.02192645, 0.02278664], - [0.03069703, 0.00759555]]) - - assert_allclose(B_0, B_0_sol, rtol=1e-6) From 6461b5d1ea02ad8cf7f532b2e077d6a757b43c93 Mon Sep 17 00:00:00 2001 From: QBatista Date: Thu, 9 Apr 2020 16:28:26 +0900 Subject: [PATCH 7/9] FIX: Remove `import warnings` --- quantecon/amf.py | 1 - 1 file changed, 1 deletion(-) diff --git a/quantecon/amf.py b/quantecon/amf.py index d7bd31ded..421898907 100644 --- a/quantecon/amf.py +++ b/quantecon/amf.py @@ -7,7 +7,6 @@ import scipy.linalg as la import quantecon as qe from collections import namedtuple -import warnings ad_lss_var = namedtuple('additive_decomp', 'ν H g') From 85154843d059740d7e1fb1a86d92d5653ad9a996 Mon Sep 17 00:00:00 2001 From: QBatista Date: Thu, 9 Apr 2020 17:01:20 +0900 Subject: [PATCH 8/9] TEST: Modify tests for coverage --- quantecon/tests/test_amf.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/quantecon/tests/test_amf.py b/quantecon/tests/test_amf.py index 5da02bb2b..0b34b4b41 100644 --- a/quantecon/tests/test_amf.py +++ b/quantecon/tests/test_amf.py @@ -144,7 +144,7 @@ def test_invalid_dimensions(self): def test_invalid_shape(self): inputs = (self.A, self.B, self.D, self.F, self.ν) inputs_passed = list(inputs) - invalid_input = np.eye(10) + invalid_input = np.ones((10, 10, 10)) for i in range(len(inputs)): inputs_passed[i] = invalid_input # Set input i to be invalid @@ -163,6 +163,12 @@ def test_non_square_A(self): with assert_raises(ValueError): AMF_LSS_VAR(A, B, D, F, ν) + def test_default_kwargs(self): + amf = AMF_LSS_VAR(self.A, self.B, self.D) + + assert_array_equal(amf.F, np.zeros((amf.nk, amf.nk))) + assert_array_equal(amf.ν, np.zeros((amf.ny, 1))) + def test_loglikelihood(self): x = np.random.rand(4, 10) * 0.005 y = np.random.rand(1, 10) * 0.005 From 49ed148e1f6e41fc401057ecb958f0b99bdfccf3 Mon Sep 17 00:00:00 2001 From: QBatista Date: Sun, 19 Apr 2020 16:54:29 +0900 Subject: [PATCH 9/9] RFC: Change `AMF_LSS_VAR` to `AMF` and simplify some names --- quantecon/__init__.py | 2 +- quantecon/amf.py | 14 +++++++------- quantecon/tests/test_amf.py | 14 +++++++------- 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/quantecon/__init__.py b/quantecon/__init__.py index e9d5625b1..a67c78d60 100644 --- a/quantecon/__init__.py +++ b/quantecon/__init__.py @@ -18,7 +18,7 @@ from . import optimize #-Objects-# -from .amf import AMF_LSS_VAR +from .amf import AMF from .compute_fp import compute_fixed_point from .discrete_rv import DiscreteRV from .dle import DLE diff --git a/quantecon/amf.py b/quantecon/amf.py index 421898907..9fb245b25 100644 --- a/quantecon/amf.py +++ b/quantecon/amf.py @@ -9,11 +9,11 @@ from collections import namedtuple -ad_lss_var = namedtuple('additive_decomp', 'ν H g') -md_lss_var = namedtuple('multiplicative_decomp', 'ν_tilde H g') +add_decomp = namedtuple('additive_decomp', 'ν H g') +mult_decomp = namedtuple('multiplicative_decomp', 'ν_tilde H g') -class AMF_LSS_VAR: +class AMF: """ A class for transforming an additive (multiplicative) functional into a QuantEcon linear state space system. It uses the first-order VAR @@ -95,11 +95,11 @@ class AMF_LSS_VAR: >>> B = np.array([[σ, 0, 0, 0]]).T >>> D = np.array([[1, 0, 0, 0]]) @ A >>> F = np.array([[1, 0, 0, 0]]) @ B - >>> amf = qe.AMF_LSS_VAR(A, B, D, F, ν=ν) + >>> amf = qe.AMF(A, B, D, F, ν=ν) The additive decomposition can be accessed by: - >>> amf.multiplicative_decomp + >>> amf.additive_decomp additive_decomp(ν=array([[0.01]]), H=array([[0.05]]), g=array([[4. , 1.5, 2.5, 2.5]])) @@ -148,11 +148,11 @@ def __init__(self, A, B, D, F=None, ν=None): g = self.D @ A_res H = self.F + self.D @ A_res @ self.B - self.additive_decomp = ad_lss_var(self.ν, H, g) + self.additive_decomp = add_decomp(self.ν, H, g) # = Compute Multiplicative Decomposition = # ν_tilde = self.ν + (.5) * np.expand_dims(np.diag(H @ H.T), 1) - self.multiplicative_decomp = md_lss_var(ν_tilde, H, g) + self.multiplicative_decomp = mult_decomp(ν_tilde, H, g) # = Construct LSS = # nx0c = np.zeros((self.nx, 1)) diff --git a/quantecon/tests/test_amf.py b/quantecon/tests/test_amf.py index 0b34b4b41..1e016acce 100644 --- a/quantecon/tests/test_amf.py +++ b/quantecon/tests/test_amf.py @@ -5,12 +5,12 @@ import numpy as np from numpy.testing import assert_array_equal, assert_allclose -from quantecon import AMF_LSS_VAR +from quantecon import AMF from scipy.stats import multivariate_normal as mvn from nose.tools import assert_raises -class TestAMFLSSVAR: +class TestAMF: def setUp(self): ϕ_1, ϕ_2, ϕ_3, ϕ_4 = 0.5, -0.2, 0, 0.5 σ = 0.01 @@ -28,7 +28,7 @@ def setUp(self): self.D = np.array([[1, 0, 0, 0]]) @ self.A self.F = np.array([[1, 0, 0, 0]]) @ self.B - self.amf = AMF_LSS_VAR(self.A, self.B, self.D, self.F, self.ν) + self.amf = AMF(self.A, self.B, self.D, self.F, self.ν) def test__construct_x0(self): ny0r = np.ones(2) @@ -137,7 +137,7 @@ def test_invalid_dimensions(self): for i in range(len(inputs)): inputs_passed[i] = invalid_input # Set input i to be invalid with assert_raises(ValueError): - AMF_LSS_VAR(*inputs_passed) + AMF(*inputs_passed) inputs_passed[i] = inputs[i] # Restore original input @@ -149,7 +149,7 @@ def test_invalid_shape(self): for i in range(len(inputs)): inputs_passed[i] = invalid_input # Set input i to be invalid with assert_raises(ValueError): - AMF_LSS_VAR(*inputs_passed) + AMF(*inputs_passed) inputs_passed[i] = inputs[i] # Restore original input @@ -161,10 +161,10 @@ def test_non_square_A(self): ν = np.zeros((2, 1)) with assert_raises(ValueError): - AMF_LSS_VAR(A, B, D, F, ν) + AMF(A, B, D, F, ν) def test_default_kwargs(self): - amf = AMF_LSS_VAR(self.A, self.B, self.D) + amf = AMF(self.A, self.B, self.D) assert_array_equal(amf.F, np.zeros((amf.nk, amf.nk))) assert_array_equal(amf.ν, np.zeros((amf.ny, 1)))