From f8a12259fe7ef58d257b375ee5b4be5db0d45637 Mon Sep 17 00:00:00 2001 From: Ricardo Vieira Date: Thu, 23 May 2024 12:20:33 +0200 Subject: [PATCH 1/3] Remove unused test models --- tests/models.py | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/tests/models.py b/tests/models.py index e1688a69614..58f023af8f9 100644 --- a/tests/models.py +++ b/tests/models.py @@ -83,17 +83,6 @@ def simple_init(): return model, start, step, moments -def simple_2model(): - mu = -2.1 - tau = 1.3 - p = 0.4 - with Model() as model: - x = pm.Normal("x", mu, tau=tau, initval=0.1) - pm.Deterministic("logx", pt.log(x)) - pm.Bernoulli("y", p) - return model.initial_point(), model - - def simple_2model_continuous(): mu = -2.1 tau = 1.3 @@ -176,13 +165,6 @@ def non_normal(n=2): return model.initial_point(), model, (np.tile([0.5], n), None) -def exponential_beta(n=2): - with pm.Model() as model: - pm.Beta("x", 3, 1, size=n, transform=None) - pm.Exponential("y", 1, size=n, transform=None) - return model.initial_point(), model, None - - def beta_bernoulli(n=2): with pm.Model() as model: pm.Beta("x", 3, 1, size=n, transform=None) From c4a081e576b115165bfed6c9321f5ffb32413d96 Mon Sep 17 00:00:00 2001 From: Ricardo Vieira Date: Wed, 22 May 2024 18:12:10 +0200 Subject: [PATCH 2/3] Get rid of uses of intX besides `convert_observed_data` --- pymc/distributions/multivariate.py | 6 +++--- pymc/distributions/timeseries.py | 8 ++++---- pymc/testing.py | 4 ++-- tests/distributions/test_multivariate.py | 10 +++++----- tests/variational/test_inference.py | 3 +-- 5 files changed, 15 insertions(+), 16 deletions(-) diff --git a/pymc/distributions/multivariate.py b/pymc/distributions/multivariate.py index 395461f7504..347a6e2539a 100644 --- a/pymc/distributions/multivariate.py +++ b/pymc/distributions/multivariate.py @@ -72,7 +72,7 @@ from pymc.distributions.transforms import Interval, ZeroSumTransform, _default_transform from pymc.logprob.abstract import _logprob from pymc.math import kron_diag, kron_dot -from pymc.pytensorf import intX, normalize_rng_param +from pymc.pytensorf import normalize_rng_param from pymc.util import check_dist_not_registered __all__ = [ @@ -929,7 +929,7 @@ class Wishart(Continuous): @classmethod def dist(cls, nu, V, *args, **kwargs): - nu = pt.as_tensor_variable(intX(nu)) + nu = pt.as_tensor_variable(nu, dtype=int) V = pt.as_tensor_variable(V) warnings.warn( @@ -2454,7 +2454,7 @@ class StickBreakingWeightsRV(RandomVariable): def make_node(self, rng, size, dtype, alpha, K): alpha = pt.as_tensor_variable(alpha) - K = pt.as_tensor_variable(intX(K)) + K = pt.as_tensor_variable(K, dtype=int) if K.ndim > 0: raise ValueError("K must be a scalar.") diff --git a/pymc/distributions/timeseries.py b/pymc/distributions/timeseries.py index 714508e272a..d48b734ae22 100644 --- a/pymc/distributions/timeseries.py +++ b/pymc/distributions/timeseries.py @@ -45,7 +45,7 @@ from pymc.exceptions import NotConstantValueError from pymc.logprob.abstract import _logprob from pymc.logprob.basic import logp -from pymc.pytensorf import constant_fold, intX +from pymc.pytensorf import constant_fold from pymc.util import check_dist_not_registered __all__ = [ @@ -174,7 +174,7 @@ def dist(cls, init_dist, innovation_dist, steps=None, **kwargs) -> pt.TensorVari ) if steps is None: raise ValueError("Must specify steps or shape parameter") - steps = pt.as_tensor_variable(intX(steps)) + steps = pt.as_tensor_variable(steps, dtype=int) return super().dist([init_dist, innovation_dist, steps], **kwargs) @@ -599,7 +599,7 @@ def dist( ) if steps is None: raise ValueError("Must specify steps or shape parameter") - steps = pt.as_tensor_variable(intX(steps), ndim=0) + steps = pt.as_tensor_variable(steps, dtype=int, ndim=0) if init_dist is not None: if not isinstance(init_dist, TensorVariable) or not isinstance( @@ -961,7 +961,7 @@ def dist(cls, dt, sde_fn, sde_pars, *, init_dist=None, steps=None, **kwargs): ) if steps is None: raise ValueError("Must specify steps or shape parameter") - steps = pt.as_tensor_variable(intX(steps), ndim=0) + steps = pt.as_tensor_variable(steps, dtype=int, ndim=0) dt = pt.as_tensor_variable(dt) sde_pars = [pt.as_tensor_variable(x) for x in sde_pars] diff --git a/pymc/testing.py b/pymc/testing.py index 42af747f979..ddc9683db0a 100644 --- a/pymc/testing.py +++ b/pymc/testing.py @@ -44,7 +44,7 @@ local_check_parameter_to_ninf_switch, rvs_in_graph, ) -from pymc.pytensorf import compile_pymc, floatX, inputvars, intX +from pymc.pytensorf import compile_pymc, floatX, inputvars # This mode can be used for tests where model compilations takes the bulk of the runtime # AND where we don't care about posterior numerical or sampling stability (e.g., when @@ -771,7 +771,7 @@ def discrete_random_tester( f = fails while p <= alpha and f > 0: o = pymc_rand() - e = intX(ref_rand(size=size, **point)) + e = ref_rand(size=size, **point).astype(int) o = np.atleast_1d(o).flatten() e = np.atleast_1d(e).flatten() bins = min(20, max(len(set(e)), len(set(o)))) diff --git a/tests/distributions/test_multivariate.py b/tests/distributions/test_multivariate.py index 0541d8f4976..6ae0169cd47 100644 --- a/tests/distributions/test_multivariate.py +++ b/tests/distributions/test_multivariate.py @@ -43,7 +43,7 @@ from pymc.logprob.basic import logp from pymc.logprob.utils import ParameterValueError from pymc.math import kronecker -from pymc.pytensorf import compile_pymc, floatX, intX +from pymc.pytensorf import compile_pymc, floatX from pymc.sampling.forward import draw from pymc.testing import ( BaseTestDistributionRandom, @@ -674,8 +674,8 @@ def test_multinomial_p_not_normalized_symbolic(self): ) @pytest.mark.parametrize("extra_size", [(1,), (2,), (2, 3)]) def test_multinomial_vectorized(self, n, p, extra_size): - n = intX(np.array(n)) - p = floatX(np.array(p)) + n = np.array(n) + p = np.array(p) p /= p.sum(axis=-1, keepdims=True) _, bcast_p = broadcast_params([n, p], ndims_params=[0, 1]) @@ -757,8 +757,8 @@ def test_dirichlet_multinomial_matches_beta_binomial(self): ) @pytest.mark.parametrize("extra_size", [(1,), (2,), (2, 3)]) def test_dirichlet_multinomial_vectorized(self, n, a, extra_size): - n = intX(np.array(n)) - a = floatX(np.array(a)) + n = np.array(n) + a = np.array(a) _, bcast_a = broadcast_params([n, a], ndims_params=[0, 1]) size = extra_size + bcast_a.shape[:-1] diff --git a/tests/variational/test_inference.py b/tests/variational/test_inference.py index 99613511a74..b48b5c2c252 100644 --- a/tests/variational/test_inference.py +++ b/tests/variational/test_inference.py @@ -27,7 +27,6 @@ import pymc as pm import pymc.variational.opvi as opvi -from pymc.pytensorf import intX from pymc.variational.inference import ADVI, ASVGD, SVGD, FullRankADVI from pymc.variational.opvi import NotImplementedInference from tests import models @@ -278,7 +277,7 @@ def test_profile(inference): @pytest.fixture(scope="module") def binomial_model(): n_samples = 100 - xs = intX(np.random.binomial(n=1, p=0.2, size=n_samples)) + xs = np.random.binomial(n=1, p=0.2, size=n_samples) with pm.Model() as model: p = pm.Beta("p", alpha=1, beta=1) pm.Binomial("xs", n=1, p=p, observed=xs) From 33b0e2ccf7fb6bd9381b64dedf9c4268ea9eb493 Mon Sep 17 00:00:00 2001 From: Ricardo Vieira Date: Wed, 22 May 2024 18:20:47 +0200 Subject: [PATCH 3/3] Get rid of floatX_array --- pymc/pytensorf.py | 4 ---- tests/models.py | 30 +++++++++++++++--------------- 2 files changed, 15 insertions(+), 19 deletions(-) diff --git a/pymc/pytensorf.py b/pymc/pytensorf.py index 6d44603a8c1..a4b7c7ec602 100644 --- a/pymc/pytensorf.py +++ b/pymc/pytensorf.py @@ -715,10 +715,6 @@ def generator(gen, default=None): return GeneratorOp(gen, default)() -def floatX_array(x): - return floatX(np.array(x)) - - def ix_(*args): """ PyTensor np.ix_ analog diff --git a/tests/models.py b/tests/models.py index 58f023af8f9..28a15adf06d 100644 --- a/tests/models.py +++ b/tests/models.py @@ -18,19 +18,19 @@ import pytensor import pytensor.tensor as pt +from pytensor import config from pytensor.compile.ops import as_op import pymc as pm from pymc import Categorical, Metropolis, Model, Normal -from pymc.pytensorf import floatX_array def simple_model(): mu = -2.1 tau = 1.3 with Model() as model: - Normal("x", mu, tau=tau, size=2, initval=floatX_array([0.1, 0.1])) + Normal("x", mu, tau=tau, size=2, initval=np.array([0.1, 0.1]).astype(config.floatX)) return model.initial_point(), model, (mu, tau**-0.5) @@ -43,8 +43,8 @@ def another_simple_model(): def simple_categorical(): - p = floatX_array([0.1, 0.2, 0.3, 0.4]) - v = floatX_array([0.0, 1.0, 2.0, 3.0]) + p = np.array([0.1, 0.2, 0.3, 0.4]) + v = np.array([0.0, 1.0, 2.0, 3.0]) with Model() as model: Categorical("x", p, size=3, initval=[1, 2, 3]) @@ -72,7 +72,7 @@ def arbitrary_det(value): with Model() as model: a = Normal("a") b = arbitrary_det(a) - Normal("obs", mu=b.astype("float64"), observed=floatX_array([1, 3, 5])) + Normal("obs", mu=b.astype("float64"), observed=np.array([1, 3, 5], dtype="float64")) return model.initial_point(), model @@ -94,15 +94,15 @@ def simple_2model_continuous(): def mv_simple(): - mu = floatX_array([-0.1, 0.5, 1.1]) - p = floatX_array([[2.0, 0, 0], [0.05, 0.1, 0], [1.0, -0.05, 5.5]]) + mu = np.array([-0.1, 0.5, 1.1]) + p = np.array([[2.0, 0, 0], [0.05, 0.1, 0], [1.0, -0.05, 5.5]]) tau = np.dot(p, p.T) with pm.Model() as model: pm.MvNormal( "x", pt.constant(mu), tau=pt.constant(tau), - initval=floatX_array([0.1, 1.0, 0.8]), + initval=np.array([0.1, 1.0, 0.8]), ) H = tau C = np.linalg.inv(H) @@ -110,15 +110,15 @@ def mv_simple(): def mv_simple_coarse(): - mu = floatX_array([-0.2, 0.6, 1.2]) - p = floatX_array([[2.0, 0, 0], [0.05, 0.1, 0], [1.0, -0.05, 5.5]]) + mu = np.array([-0.2, 0.6, 1.2]) + p = np.array([[2.0, 0, 0], [0.05, 0.1, 0], [1.0, -0.05, 5.5]]) tau = np.dot(p, p.T) with pm.Model() as model: pm.MvNormal( "x", pt.constant(mu), tau=pt.constant(tau), - initval=floatX_array([0.1, 1.0, 0.8]), + initval=np.array([0.1, 1.0, 0.8]), ) H = tau C = np.linalg.inv(H) @@ -126,15 +126,15 @@ def mv_simple_coarse(): def mv_simple_very_coarse(): - mu = floatX_array([-0.3, 0.7, 1.3]) - p = floatX_array([[2.0, 0, 0], [0.05, 0.1, 0], [1.0, -0.05, 5.5]]) + mu = np.array([-0.3, 0.7, 1.3]) + p = np.array([[2.0, 0, 0], [0.05, 0.1, 0], [1.0, -0.05, 5.5]]) tau = np.dot(p, p.T) with pm.Model() as model: pm.MvNormal( "x", pt.constant(mu), tau=pt.constant(tau), - initval=floatX_array([0.1, 1.0, 0.8]), + initval=np.array([0.1, 1.0, 0.8]), ) H = tau C = np.linalg.inv(H) @@ -144,7 +144,7 @@ def mv_simple_very_coarse(): def mv_simple_discrete(): d = 2 n = 5 - p = floatX_array([0.15, 0.85]) + p = np.array([0.15, 0.85]) with pm.Model() as model: pm.Multinomial("x", n, pt.constant(p), initval=np.array([1, 4])) mu = n * p