diff --git a/.gitignore b/.gitignore index aabab04a..1c02e5e1 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,5 @@ *.jl.*.cov *.jl.cov *.jl.mem -/Manifest.toml +Manifest.toml /docs/build/ -/docs/Manifest.toml diff --git a/Project.toml b/Project.toml index 2caf0bb3..c18c60dd 100644 --- a/Project.toml +++ b/Project.toml @@ -1,16 +1,16 @@ name = "Pathfinder" uuid = "b1d3bc72-d0e7-4279-b92f-7fa5d6d2d454" authors = ["Seth Axen and contributors"] -version = "0.7.0-DEV" +version = "0.7.0" [deps] -AbstractDifferentiation = "c29ec348-61ec-40c8-8164-b8c60e9d9f3d" Accessors = "7d9f7c33-5ae7-4f3b-8dc6-eff91059b697" Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f" Folds = "41a02a25-b8f0-4f67-bc48-60067656b558" ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" IrrationalConstants = "92d709cd-6900-40b7-9082-c6be49f344b6" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" +LogDensityProblems = "6fdf6af0-433a-55f7-b3ed-c6c6e0b8df7c" Optim = "429524aa-4258-5aef-a3af-852621145aeb" Optimization = "7f7a1694-90dd-40f0-9382-eb1efda571ba" OptimizationOptimJL = "36348300-93cb-4f02-beb5-3c3902f8871e" @@ -26,12 +26,12 @@ Transducers = "28d57a85-8fef-5791-bfe6-a80928e7c999" UnPack = "3a884ed6-31ef-47d7-9d2a-63182c4928ed" [compat] -AbstractDifferentiation = "0.4" Accessors = "0.1" Distributions = "0.25" Folds = "0.2" ForwardDiff = "0.10" IrrationalConstants = "0.1.1" +LogDensityProblems = "2" Optim = "1.4" Optimization = "3" OptimizationOptimJL = "0.1" @@ -47,8 +47,7 @@ julia = "1.6" [extras] OptimizationNLopt = "4e6fcdb7-1186-4e1f-a706-475e75c168bb" -ReverseDiff = "37e2e3b7-166d-5795-8a7a-e32c996b4267" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" [targets] -test = ["OptimizationNLopt", "ReverseDiff", "Test"] +test = ["OptimizationNLopt", "Test"] diff --git a/docs/src/examples/initializing-hmc.md b/docs/src/examples/initializing-hmc.md index 6b23a008..b939bbcc 100644 --- a/docs/src/examples/initializing-hmc.md +++ b/docs/src/examples/initializing-hmc.md @@ -85,7 +85,7 @@ nothing # hide To use DynamicHMC, we first need to transform our model to an unconstrained space using [TransformVariables.jl](https://tamaspapp.eu/TransformVariables.jl/stable/) and wrap it in a type that implements the [LogDensityProblems.jl](https://github.com/tpapp/LogDensityProblems.jl) interface: ```@example 1 -using DynamicHMC, LogDensityProblems, LogDensityProblemsAD, TransformVariables +using DynamicHMC, ForwardDiff, LogDensityProblems, LogDensityProblemsAD, TransformVariables using TransformedLogDensities: TransformedLogDensity transform = as((σ=asℝ₊, α=asℝ, β=as(Array, J))) @@ -93,12 +93,10 @@ P = TransformedLogDensity(transform, model) ∇P = ADgradient(:ForwardDiff, P) ``` -Pathfinder, on the other hand, expects a log-density function: +Pathfinder can take any object that implements this interface. ```@example 1 -logp(x) = LogDensityProblems.logdensity(P, x) -∇logp(x) = LogDensityProblems.logdensity_and_gradient(∇P, x)[2] -result_pf = pathfinder(logp, ∇logp; dim) +result_pf = pathfinder(∇P) ``` ```@example 1 @@ -158,10 +156,10 @@ result_dhmc3 = mcmc_with_warmup( ## AdvancedHMC.jl -Similar to DynamicHMC, AdvancedHMC can work with an object implementing the LogDensityProblems API: +Similar to Pathfinder and DynamicHMC, AdvancedHMC can also work with a LogDensityProblem: ```@example 1 -using AdvancedHMC, ForwardDiff +using AdvancedHMC nadapts = 500; nothing # hide diff --git a/docs/src/examples/quickstart.md b/docs/src/examples/quickstart.md index 0027c117..2ac4fced 100644 --- a/docs/src/examples/quickstart.md +++ b/docs/src/examples/quickstart.md @@ -6,11 +6,28 @@ This page introduces basic Pathfinder usage with examples. For a simple example, we'll run Pathfinder on a multivariate normal distribution with a dense covariance matrix. +Pathfinder expects an object that implements the [LogDensityProblems](https://www.tamaspapp.eu/LogDensityProblems.jl) interface and has a gradient implemented. +We can use automatic differentiation to compute the gradient using [LogDensityProblemsAD](https://github.com/tpapp/LogDensityProblemsAD.jl). ```@example 1 -using LinearAlgebra, Pathfinder, Printf, StatsPlots, Random +using ForwardDiff, LinearAlgebra, LogDensityProblems, LogDensityProblemsAD, + Pathfinder, Printf, StatsPlots, Random Random.seed!(42) +ForwardDiff, LogDensityProblems, LogDensityProblemsAD +struct MvNormalProblem{T,S} + μ::T # mean + P::S # precision matrix +end +function LogDensityProblems.capabilities(::Type{<:MvNormalProblem}) + return LogDensityProblems.LogDensityOrder{0}() +end +LogDensityProblems.dimension(ℓ::MvNormalProblem) = length(ℓ.μ) +function LogDensityProblems.logdensity(ℓ::MvNormalProblem, x) + z = x - μ + return -dot(z, P, z) / 2 +end + Σ = [ 2.71 0.5 0.19 0.07 1.04 0.5 1.11 -0.08 -0.17 -0.08 @@ -20,20 +37,15 @@ Random.seed!(42) ] μ = [-0.55, 0.49, -0.76, 0.25, 0.94] P = inv(Symmetric(Σ)) -dim = length(μ) -init_scale=4 +prob_mvnormal = ADgradient(:ForwardDiff, MvNormalProblem(μ, P)) -function logp_mvnormal(x) - z = x - μ - return -dot(z, P, z) / 2 -end nothing # hide ``` Now we run [`pathfinder`](@ref). ```@example 1 -result = pathfinder(logp_mvnormal; dim, init_scale) +result = pathfinder(prob_mvnormal; init_scale=4) ``` `result` is a [`PathfinderResult`](@ref). @@ -114,14 +126,21 @@ Now we will run Pathfinder on the following banana-shaped distribution with dens \pi(x_1, x_2) = e^{-x_1^2 / 2} e^{-5 (x_2 - x_1^2)^2 / 2}. ``` -First we define the distribution: +First we define the log density problem: ```@example 1 Random.seed!(23) -logp_banana(x) = -(x[1]^2 + 5(x[2] - x[1]^2)^2) / 2 -dim = 2 -init_scale = 10 +struct BananaProblem end +function LogDensityProblems.capabilities(::Type{<:BananaProblem}) + return LogDensityProblems.LogDensityOrder{0}() +end +LogDensityProblems.dimension(ℓ::BananaProblem) = 2 +function LogDensityProblems.logdensity(ℓ::BananaProblem, x) + return -(x[1]^2 + 5(x[2] - x[1]^2)^2) / 2 +end + +prob_banana = ADgradient(:ForwardDiff, BananaProblem()) nothing # hide ``` @@ -131,13 +150,14 @@ and then visualise it: ```@example 1 xrange = -3.5:0.05:3.5 yrange = -3:0.05:7 +logp_banana(x) = LogDensityProblems.logdensity(prob_banana, x) contour(xrange, yrange, exp ∘ logp_banana ∘ Base.vect; xlabel="x₁", ylabel="x₂") ``` Now we run [`pathfinder`](@ref). ```@example 1 -result = pathfinder(logp_banana; dim, init_scale) +result = pathfinder(prob_banana; init_scale=10) ``` As before we can visualise each iteration of the algorithm. @@ -155,7 +175,7 @@ Especially for such complicated target distributions, it's always a good idea to ```@example 1 ndraws = 1_000 -result = multipathfinder(logp_banana, ndraws; nruns=20, dim, init_scale) +result = multipathfinder(prob_banana, ndraws; nruns=20, init_scale=10) ``` `result` is a [`MultiPathfinderResult`](@ref). @@ -163,7 +183,7 @@ See its docstring for a description of its fields. `result.fit_distribution` is a uniformly-weighted `Distributions.MixtureModel`. Each component is the result of a single Pathfinder run. -It's possible that some runs fit the target distribution much better than others, so instead of just drawing samples from `result.fit_distribution`, `multipathfinder` draws many samples from each component and then uses Pareto-smoothed importance resampling (using [PSIS.jl](https://psis.julia.arviz.org/stable/)) from these draws to better target `logp_banana`. +It's possible that some runs fit the target distribution much better than others, so instead of just drawing samples from `result.fit_distribution`, `multipathfinder` draws many samples from each component and then uses Pareto-smoothed importance resampling (using [PSIS.jl](https://psis.julia.arviz.org/stable/)) from these draws to better target `prob_banana`. The Pareto shape diagnostic informs us on the quality of these draws. Here the Pareto shape ``k`` diagnostic is bad (``k > 0.7``), which tells us that these draws are unsuitable for computing posterior estimates, so we should definitely run MCMC to get better draws. @@ -207,15 +227,23 @@ function logp_funnel(x) return ((τ / 3)^2 + (n - 1) * τ + sum(b -> abs2(b * exp(-τ / 2)), β)) / -2 end -dim = 100 -init_scale = 10 +struct FunnelProblem + dim::Int +end +function LogDensityProblems.capabilities(::Type{<:FunnelProblem}) + return LogDensityProblems.LogDensityOrder{0}() +end +LogDensityProblems.dimension(ℓ::FunnelProblem) = ℓ.dim +LogDensityProblems.logdensity(::FunnelProblem, x) = logp_funnel(x) + +prob_funnel = ADgradient(:ForwardDiff, FunnelProblem(100)) nothing # hide ``` First, let's fit this posterior with single-path Pathfinder. ```@example 1 -result_single = pathfinder(logp_funnel; dim, init_scale) +result_single = pathfinder(prob_funnel; init_scale=10) ``` Let's visualize this sequence of multivariate normals for the first two dimensions. @@ -240,7 +268,7 @@ Now we run [`multipathfinder`](@ref). ```@example 1 ndraws = 1_000 -result = multipathfinder(logp_funnel, ndraws; nruns=20, dim, init_scale) +result = multipathfinder(prob_funnel, ndraws; nruns=20, init_scale=10) ``` Again, the poor Pareto shape diagnostic indicates we should run MCMC to get draws suitable for computing posterior estimates. diff --git a/docs/src/index.md b/docs/src/index.md index 8eff5a93..4cc49757 100644 --- a/docs/src/index.md +++ b/docs/src/index.md @@ -42,7 +42,7 @@ Pathfinder uses several packages for extended functionality: - [Optimization.jl](https://optimization.sciml.ai/stable/): This allows the L-BFGS optimizer to be replaced with any of the many Optimization-compatible optimizers and supports use of callbacks. Note that any changes made to Pathfinder using these features would be experimental. - [Transducers.jl](https://juliafolds.github.io/Transducers.jl/stable/): parallelization support - [Distributions.jl](https://juliastats.org/Distributions.jl/stable/)/[PDMats.jl](https://github.com/JuliaStats/PDMats.jl): fits can be used anywhere a `Distribution` can be used -- [AbstractDifferentiation.jl](https://github.com/JuliaDiff/AbstractDifferentiation.jl): selecting the AD package used to differentiate the provided log-density function. +- [LogDensityProblems.jl](https://www.tamaspapp.eu/LogDensityProblems.jl/stable/): defining the log-density function, gradient, and Hessian - [ProgressLogging.jl](https://julialogging.github.io/ProgressLogging.jl/stable/): In Pluto, Juno, and VSCode, nested progress bars are shown. In the REPL, use TerminalLoggers.jl to get progress bars. [^Zhang2021]: Lu Zhang, Bob Carpenter, Andrew Gelman, Aki Vehtari (2021). diff --git a/src/Pathfinder.jl b/src/Pathfinder.jl index a261c5c8..2fec765a 100644 --- a/src/Pathfinder.jl +++ b/src/Pathfinder.jl @@ -1,12 +1,12 @@ module Pathfinder -using AbstractDifferentiation: AD using Distributions: Distributions using Folds: Folds # ensure that ForwardDiff is conditionally loaded by Optimization using ForwardDiff: ForwardDiff using IrrationalConstants: log2π using LinearAlgebra +using LogDensityProblems: LogDensityProblems using Optim: Optim, LineSearches using Optimization: Optimization using OptimizationOptimJL: OptimizationOptimJL diff --git a/src/multipath.jl b/src/multipath.jl index 6aeb28cc..150b6905 100644 --- a/src/multipath.jl +++ b/src/multipath.jl @@ -67,8 +67,7 @@ function Base.show(io::IO, ::MIME"text/plain", result::MultiPathfinderResult) end """ - multipathfinder(logp, ndraws; kwargs...) - multipathfinder(logp, ∇logp, ndraws; kwargs...) + multipathfinder(ℓ, ndraws; kwargs...) multipathfinder(fun::SciMLBase.OptimizationFunction, ndraws; kwargs...) Run [`pathfinder`](@ref) multiple times to fit a multivariate normal mixture model. @@ -91,31 +90,29 @@ resulting draws better approximate draws from the target distribution ``p`` inst for approximating expectations with respect to ``p``. # Arguments -- `logp`: a callable that computes the log-density of the target distribution. -- `∇logp`: a callable that computes the gradient of `logp`. If not provided, `logp` is - automatically differentiated using the backend specified in `ad_backend`. +- `ℓ`: an object, representing the log-density of the target distribution and its gradient, + that implements the [LogDensityProblems](https://www.tamaspapp.eu/LogDensityProblems.jl) + interface. - `fun::SciMLBase.OptimizationFunction`: an optimization function that represents - `-logp(x)` with its gradient. It must have the necessary features (e.g. a Hessian - function) for the chosen optimization algorithm. For details, see + a negative log density with its gradient. It must have the necessary features (e.g. a + Hessian function) for the chosen optimization algorithm. For details, see [Optimization.jl: OptimizationFunction](https://optimization.sciml.ai/stable/API/optimization_function/). - `ndraws::Int`: number of approximate draws to return # Keywords - `init`: iterator of length `nruns` of initial points of length `dim` from which each single-path Pathfinder run will begin. `length(init)` must be implemented. If `init` is - not provided, `dim` and `nruns` must be. + not provided, `nruns` must be, and `dim` must be if `fun` provided. - `nruns::Int`: number of runs of Pathfinder to perform. Ignored if `init` is provided. -- `ad_backend=AD.ForwardDiffBackend()`: AbstractDifferentiation.jl AD backend used to - differentiate `logp` if `∇logp` is not provided. - `ndraws_per_run::Int`: The number of draws to take for each component before resampling. Defaults to a number such that `ndraws_per_run * nruns > ndraws`. - `importance::Bool=true`: Perform Pareto smoothed importance resampling of draws. - `rng::AbstractRNG=Random.GLOBAL_RNG`: Pseudorandom number generator. It is recommended to use a parallelization-friendly PRNG like the default PRNG on Julia 1.7 and up. -- `executor::Transducers.Executor`: Transducers.jl executor that determines if and how - to run the single-path runs in parallel. If `rng` is known to be thread-safe, the - default is `Transducers.PreferParallel()` (parallel executation, defaulting to - multi-threading). Otherwise, it is `Transducers.SequentialEx()` (no parallelization). +- `executor::Transducers.Executor=Transducers.SequentialEx()`: Transducers.jl executor that + determines if and how to run the single-path runs in parallel. If a transducer for + multi-threaded computation is selected, you must first verify that `rng` and the log + density function are thread-safe. - `executor_per_run::Transducers.Executor=Transducers.SequentialEx()`: Transducers.jl executor used within each run to parallelize PRNG calls. Defaults to no parallelization. See [`pathfinder`](@ref) for a description. @@ -126,22 +123,11 @@ for approximating expectations with respect to ``p``. """ function multipathfinder end -function multipathfinder( - logp, ndraws::Int; ad_backend=AD.ForwardDiffBackend(), input=logp, kwargs... -) - return multipathfinder(build_optim_function(logp; ad_backend), ndraws; input, kwargs...) -end -function multipathfinder( - logp, - ∇logp, - ndraws::Int; - ad_backend=AD.ForwardDiffBackend(), - input=(logp, ∇logp), - kwargs..., -) - return multipathfinder( - build_optim_function(logp, ∇logp; ad_backend), ndraws; input, kwargs... - ) +function multipathfinder(ℓ, ndraws::Int; input=ℓ, kwargs...) + _check_log_density_problem(ℓ) + dim = LogDensityProblems.dimension(ℓ) + optim_fun = build_optim_function(ℓ) + return multipathfinder(optim_fun, ndraws; input, dim, kwargs...) end function multipathfinder( optim_fun::SciMLBase.OptimizationFunction, @@ -154,7 +140,7 @@ function multipathfinder( rng::Random.AbstractRNG=Random.GLOBAL_RNG, history_length::Int=DEFAULT_HISTORY_LENGTH, optimizer=default_optimizer(history_length), - executor::Transducers.Executor=_default_executor(rng), + executor::Transducers.Executor=Transducers.SequentialEx(), executor_per_run=Transducers.SequentialEx(), importance::Bool=true, kwargs..., diff --git a/src/optimize.jl b/src/optimize.jl index f5055d32..db1c4f11 100644 --- a/src/optimize.jl +++ b/src/optimize.jl @@ -1,28 +1,16 @@ -function build_optim_function(f; ad_backend=AD.ForwardDiffBackend()) - ∇f(x) = only(AD.gradient(ad_backend, f, x)) - return build_optim_function(f, ∇f; ad_backend) -end -function build_optim_function(f, ∇f; ad_backend=AD.ForwardDiffBackend()) - # because we need explicit access to grad, we generate these ourselves instead of using - # Optimization.jl's auto-AD feature. - # TODO: switch to caching API if available, see - # https://github.com/JuliaDiff/AbstractDifferentiation.jl/issues/41 +function build_optim_function(ℓ) + f(x, p) = -LogDensityProblems.logdensity(ℓ, x) function grad(res, x, p) - ∇fx = ∇f(x) + _, ∇fx = LogDensityProblems.logdensity_and_gradient(ℓ, x) @. res = -∇fx return res end function hess(res, x, p) - H = only(AD.hessian(ad_backend, f, x)) + _, _, H = LogDensityProblems.logdensity_gradient_and_hessian(ℓ, x) @. res = -H return res end - function hv(res, x, v, p) - Hv = only(AD.lazy_hessian(ad_backend, f, x) * v) - @. res = -Hv - return res - end - return SciMLBase.OptimizationFunction{true}((x, p) -> -f(x); grad, hess, hv) + return SciMLBase.OptimizationFunction{true}(f; grad, hess) end function build_optim_problem(optim_fun, x₀) diff --git a/src/singlepath.jl b/src/singlepath.jl index 0add6dbb..4603a2c3 100644 --- a/src/singlepath.jl +++ b/src/singlepath.jl @@ -5,8 +5,8 @@ Container for results of single-path Pathfinder. # Fields -- `input`: User-provided input object, e.g. either `logp`, `(logp, ∇logp)`, `optim_fun`, - `optim_prob`, or another object. +- `input`: User-provided input object, e.g. a LogDensityProblem, `optim_fun`, `optim_prob`, + or another object. - `optimizer`: Optimizer used for maximizing the log-density - `rng`: Pseudorandom number generator that was used for sampling - `optim_prob::SciMLBase.OptimizationProblem`: Otimization problem used for @@ -68,36 +68,35 @@ function Base.show(io::IO, ::MIME"text/plain", result::PathfinderResult) end """ - pathfinder(logp; kwargs...) - pathfinder(logp, ∇logp; kwargs...) + pathfinder(ℓ; kwargs...) pathfinder(fun::SciMLBase::OptimizationFunction; kwargs...) pathfinder(prob::SciMLBase::OptimizationProblem; kwargs...) -Find the best multivariate normal approximation encountered while maximizing `logp`. +Find the best multivariate normal approximation encountered while maximizing a log density. From an optimization trajectory, Pathfinder constructs a sequence of (multivariate normal) -approximations to the distribution specified by `logp`. The approximation that maximizes the -evidence lower bound (ELBO), or equivalently, minimizes the KL divergence between the -approximation and the true distribution, is returned. +approximations to the distribution specified by a log density function. The approximation +that maximizes the evidence lower bound (ELBO), or equivalently, minimizes the KL divergence +between the approximation and the true distribution, is returned. The covariance of the multivariate normal distribution is an inverse Hessian approximation constructed using at most the previous `history_length` steps. # Arguments -- `logp`: a callable that computes the log-density of the target distribution. -- `∇logp`: a callable that computes the gradient of `logp`. If not provided, `logp` is - automatically differentiated using the backend specified in `ad_backend`. -- `fun::SciMLBase.OptimizationFunction`: an optimization function that represents - `-logp(x)` with its gradient. It must have the necessary features (e.g. a Hessian - function) for the chosen optimization algorithm. For details, see +- `ℓ`: an object, representing the log-density of the target distribution and its gradient, + that implements the [LogDensityProblems](https://www.tamaspapp.eu/LogDensityProblems.jl) + interface. +- `fun::SciMLBase.OptimizationFunction`: an optimization function that represents the + negative log density with its gradient. It must have the necessary features (e.g. a + Hessian function, if applicable) for the chosen optimization algorithm. For details, see [Optimization.jl: OptimizationFunction](https://optimization.sciml.ai/stable/API/optimization_function/). - `prob::SciMLBase.OptimizationProblem`: an optimization problem containing a function with the same properties as `fun`, as well as an initial point, in which case `init` and `dim` are ignored. # Keywords -- `dim::Int`: dimension of the target distribution. If not provided, `init` or must be. - Ignored if `init` is provided. +- `dim::Int`: dimension of the target distribution, needed only if `fun` is provided and + `init` is not. - `init::AbstractVector{<:Real}`: initial point of length `dim` from which to begin optimization. If not provided, an initial point of type `Vector{Float64}` and length `dim` is created and filled using `init_sampler`. @@ -107,7 +106,6 @@ constructed using at most the previous `history_length` steps. length `dims` in-place to generate an initial point - `ndraws_elbo::Int=$DEFAULT_NDRAWS_ELBO`: Number of draws used to estimate the ELBO - `ndraws::Int=ndraws_elbo`: number of approximate draws to return -- `ad_backend=AD.ForwardDiffBackend()`: AbstractDifferentiation.jl AD backend. - `rng::Random.AbstractRNG`: The random number generator to be used for drawing samples - `executor::Transducers.Executor=Transducers.SequentialEx()`: Transducers.jl executor that determines if and how to perform ELBO computation in parallel. The default @@ -134,13 +132,11 @@ constructed using at most the previous `history_length` steps. """ function pathfinder end -function pathfinder(logp; ad_backend=AD.ForwardDiffBackend(), kwargs...) - return pathfinder(build_optim_function(logp; ad_backend); input=logp, kwargs...) -end -function pathfinder(logp, ∇logp; ad_backend=AD.ForwardDiffBackend(), kwargs...) - return pathfinder( - build_optim_function(logp, ∇logp; ad_backend); input=(logp, ∇logp), kwargs... - ) +function pathfinder(ℓ; input=ℓ, kwargs...) + _check_log_density_problem(ℓ) + dim = LogDensityProblems.dimension(ℓ) + optim_fun = build_optim_function(ℓ) + return pathfinder(optim_fun; input, dim, kwargs...) end function pathfinder( optim_fun::SciMLBase.OptimizationFunction; @@ -337,3 +333,21 @@ function (s::UniformSampler)(rng::Random.AbstractRNG, point) @. point = rand(rng) * 2scale - scale return point end + +function _check_log_density_problem(ℓ) + capabilities = LogDensityProblems.capabilities(ℓ) + if capabilities === nothing + throw( + ArgumentError( + "Provided object must implement the LogDensityProblems interface. See https://www.tamaspapp.eu/LogDensityProblems.jl.", + ), + ) + elseif capabilities === LogDensityProblems.LogDensityOrder{0}() + throw( + ArgumentError( + "The log density problem must at least support gradient computation. To use automatic differentiation, see https://github.com/tpapp/LogDensityProblemsAD.jl.", + ), + ) + end + return nothing +end diff --git a/src/transducers.jl b/src/transducers.jl index 7f28a97f..cea38ae0 100644 --- a/src/transducers.jl +++ b/src/transducers.jl @@ -1,19 +1,3 @@ -# utility functions to work with Transducers.jl - -_is_thread_safe_rng(rng) = false -if VERSION ≥ v"1.7.0" - _is_thread_safe_rng(rng::Random.TaskLocalRNG) = true - _is_thread_safe_rng(rng::typeof(Random.GLOBAL_RNG)) = true -end - -function _default_executor(rng; kwargs...) - if _is_thread_safe_rng(rng) - return Transducers.PreferParallel(; kwargs...) - else - return Transducers.SequentialEx() - end -end - # transducer-friendly findmax, ignoring NaNs function _findmax(x) return Transducers.foldxl(x |> Transducers.Enumerate(); init=missing) do xmax_imax, i_xi diff --git a/test/integration/AdvancedHMC/Project.toml b/test/integration/AdvancedHMC/Project.toml index 1d1095ff..ffb2362e 100644 --- a/test/integration/AdvancedHMC/Project.toml +++ b/test/integration/AdvancedHMC/Project.toml @@ -4,6 +4,7 @@ Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f" ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" LogDensityProblems = "6fdf6af0-433a-55f7-b3ed-c6c6e0b8df7c" +LogDensityProblemsAD = "996a588d-648d-4e1f-a8f0-a84b347e47b1" MCMCDiagnosticTools = "be115224-59cd-429b-ad48-344e309966f0" Optim = "429524aa-4258-5aef-a3af-852621145aeb" Pathfinder = "b1d3bc72-d0e7-4279-b92f-7fa5d6d2d454" @@ -11,14 +12,19 @@ Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" StatsFuns = "4c63d2b9-4356-54db-8cca-17b64c39e42c" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" +TransformVariables = "84d833dd-6860-57f9-a1a7-6da5db126cff" +TransformedLogDensities = "f9bc47f6-f3f8-4f3b-ab21-f8bc73906f26" [compat] AdvancedHMC = "0.4" Distributions = "0.25" ForwardDiff = "0.10" -LogDensityProblems = "2" +LogDensityProblems = "1, 2" +LogDensityProblemsAD = "1" MCMCDiagnosticTools = "0.1.3, 0.2" Optim = "1.4" Pathfinder = "0.7" StatsFuns = "0.9, 1" +TransformVariables = "0.6, 0.7" +TransformedLogDensities = "1" julia = "1.6" diff --git a/test/integration/AdvancedHMC/runtests.jl b/test/integration/AdvancedHMC/runtests.jl index d322d950..45621a16 100644 --- a/test/integration/AdvancedHMC/runtests.jl +++ b/test/integration/AdvancedHMC/runtests.jl @@ -2,28 +2,30 @@ using AdvancedHMC, ForwardDiff, LinearAlgebra, LogDensityProblems, + LogDensityProblemsAD, MCMCDiagnosticTools, Optim, Pathfinder, Random, Statistics, StatsFuns, - Test + Test, + TransformVariables +using TransformedLogDensities: TransformedLogDensity Random.seed!(0) -struct RegressionModel{X,Y} +struct RegressionProblem{X,Y} x::X y::Y end -function (m::RegressionModel)(θ) - logσ = θ[1] - σ = exp(logσ) - α = θ[2] - β = θ[3:end] - x = m.x - y = m.y +function (prob::RegressionProblem)(θ) + σ = θ.σ + α = θ.α + β = θ.β + x = prob.x + y = prob.y lp = normlogpdf(σ) + logtwo lp += normlogpdf(α) lp += sum(normlogpdf, β) @@ -34,12 +36,6 @@ function (m::RegressionModel)(θ) return lp end -function LogDensityProblems.capabilities(::Type{<:RegressionModel}) - return LogDensityProblems.LogDensityOrder{0}() -end -LogDensityProblems.dimension(m::RegressionModel) = size(m.x, 2) + 2 -LogDensityProblems.logdensity(m::RegressionModel, θ) = m(θ) - function mean_and_mcse(f, θs) zs = map(f, θs) ms = mean(zs) @@ -114,10 +110,13 @@ end y = sin.(x) .+ randn.() .* 0.2 .+ x X = [x x .^ 2 x .^ 3] θ₀ = randn(nparams) - ℓπ = RegressionModel(X, y) + prob = RegressionProblem(X, y) + trans = as((σ=asℝ₊, α=asℝ, β=as(Array, size(X, 2)))) + P = TransformedLogDensity(trans, prob) + ∇P = ADgradient(:ForwardDiff, P) metric = DiagEuclideanMetric(nparams) - hamiltonian = Hamiltonian(metric, ℓπ, ForwardDiff) + hamiltonian = Hamiltonian(metric, ∇P) ϵ = find_good_stepsize(hamiltonian, θ₀) integrator = Leapfrog(ϵ) proposal = NUTS{MultinomialTS,GeneralisedNoUTurn}(integrator) @@ -135,11 +134,11 @@ end progress=false, ) - result_pf = pathfinder(ℓπ; dim=5, optimizer=Optim.LBFGS(; m=6)) + result_pf = pathfinder(∇P; dim=5, optimizer=Optim.LBFGS(; m=6)) @testset "Initial point" begin metric = DiagEuclideanMetric(nparams) - hamiltonian = Hamiltonian(metric, ℓπ, ForwardDiff) + hamiltonian = Hamiltonian(metric, ∇P) ϵ = find_good_stepsize(hamiltonian, θ₀) integrator = Leapfrog(ϵ) proposal = NUTS{MultinomialTS,GeneralisedNoUTurn}(integrator) @@ -159,7 +158,7 @@ end @testset "Initial point and metric" begin metric = DiagEuclideanMetric(diag(result_pf.fit_distribution.Σ)) - hamiltonian = Hamiltonian(metric, ℓπ, ForwardDiff) + hamiltonian = Hamiltonian(metric, ∇P) ϵ = find_good_stepsize(hamiltonian, θ₀) integrator = Leapfrog(ϵ) proposal = NUTS{MultinomialTS,GeneralisedNoUTurn}(integrator) @@ -179,7 +178,7 @@ end @testset "Initial point and final metric" begin metric = Pathfinder.RankUpdateEuclideanMetric(result_pf.fit_distribution.Σ) - hamiltonian = Hamiltonian(metric, ℓπ, ForwardDiff) + hamiltonian = Hamiltonian(metric, ∇P) ϵ = find_good_stepsize(hamiltonian, θ₀) integrator = Leapfrog(ϵ) proposal = NUTS{MultinomialTS,GeneralisedNoUTurn}(integrator) diff --git a/test/integration/DynamicHMC/Project.toml b/test/integration/DynamicHMC/Project.toml index eb07c864..0eeefe9d 100644 --- a/test/integration/DynamicHMC/Project.toml +++ b/test/integration/DynamicHMC/Project.toml @@ -19,7 +19,7 @@ LogDensityProblems = "1, 2" LogDensityProblemsAD = "1" MCMCDiagnosticTools = "0.1.3, 0.2" Optim = "1.4" -Pathfinder = "0.5, 0.6, 0.7" +Pathfinder = "0.7" StatsFuns = "0.9, 1" TransformVariables = "0.6, 0.7" TransformedLogDensities = "1" diff --git a/test/integration/DynamicHMC/runtests.jl b/test/integration/DynamicHMC/runtests.jl index a473fe14..b1960d66 100644 --- a/test/integration/DynamicHMC/runtests.jl +++ b/test/integration/DynamicHMC/runtests.jl @@ -89,9 +89,7 @@ end result_hmc1 = mcmc_with_warmup(rng, ∇P, ndraws; reporter=NoProgressReport()) - logp(x) = LogDensityProblems.logdensity(P, x) - ∇logp(x) = LogDensityProblems.logdensity_and_gradient(∇P, x)[2] - result_pf = pathfinder(logp, ∇logp; dim=LogDensityProblems.dimension(P)) + result_pf = pathfinder(∇P) @testset "Initial point" begin result_hmc2 = mcmc_with_warmup( diff --git a/test/inverse_hessian.jl b/test/inverse_hessian.jl index 6cc696ea..d8cc3453 100644 --- a/test/inverse_hessian.jl +++ b/test/inverse_hessian.jl @@ -1,5 +1,3 @@ -using AbstractDifferentiation -using ForwardDiff using LinearAlgebra using Optim using Pathfinder @@ -52,8 +50,8 @@ end nocedal_wright_scaling(α, s, y) = fill!(similar(α), dot(y, s) / sum(abs2, y)) θ₀ = 10 * randn(n) - ad_backend = AD.ForwardDiffBackend() - fun = Pathfinder.build_optim_function(logp; ad_backend) + ℓ = build_logdensityproblem(logp, n) + fun = Pathfinder.build_optim_function(ℓ) prob = Pathfinder.build_optim_problem(fun, θ₀) optimizer = Optim.LBFGS(; m=history_length, linesearch=Optim.LineSearches.MoreThuente() diff --git a/test/multipath.jl b/test/multipath.jl index e6a7b19b..ace61956 100644 --- a/test/multipath.jl +++ b/test/multipath.jl @@ -1,15 +1,15 @@ -using AbstractDifferentiation using Distributions using ForwardDiff using LinearAlgebra using Optimization using Pathfinder using PSIS -using ReverseDiff using SciMLBase using Test using Transducers +include("test_utils.jl") + @testset "multi path pathfinder" begin @testset "MvNormal" begin dim = 10 @@ -21,7 +21,7 @@ using Transducers μ = randn(dim) d = MvNormal(μ, Σ) logp(x) = logpdf(d, x) - ∇logp(x) = ForwardDiff.gradient(logp, x) + ℓ = build_logdensityproblem(logp, dim) rngs = if VERSION ≥ v"1.7" [MersenneTwister(), Random.default_rng()] else @@ -33,10 +33,10 @@ using Transducers Random.seed!(rng, seed) result = multipathfinder( - logp, ∇logp, ndraws; dim, nruns, ndraws_elbo, ndraws_per_run, rng, executor + ℓ, ndraws; nruns, ndraws_elbo, ndraws_per_run, rng, executor ) @test result isa MultiPathfinderResult - @test result.input === (logp, ∇logp) + @test result.input === ℓ @test result.optim_fun isa SciMLBase.OptimizationFunction @test result.rng === rng @test result.optimizer === @@ -70,24 +70,15 @@ using Transducers Random.seed!(rng, seed) result2 = multipathfinder( - logp, ndraws; dim, nruns, ndraws_elbo, ndraws_per_run, rng, executor + ℓ, ndraws; nruns, ndraws_elbo, ndraws_per_run, rng, executor ) @test result2.fit_distribution == result.fit_distribution @test result2.draws == result.draws @test result2.draw_component_ids == result.draw_component_ids Random.seed!(rng, seed) - ad_backend = AD.ReverseDiffBackend() result3 = multipathfinder( - logp, - ndraws; - dim, - nruns, - ndraws_elbo, - ndraws_per_run, - rng, - executor, - ad_backend, + ℓ, ndraws; nruns, ndraws_elbo, ndraws_per_run, rng, executor ) for (c1, c2) in zip(result.fit_distribution.components, result3.fit_distribution.components) @@ -96,7 +87,7 @@ using Transducers end init = [randn(dim) for _ in 1:nruns] - result = multipathfinder(logp, ∇logp, ndraws; init) + result = multipathfinder(ℓ, ndraws; init) @test ncomponents(result.fit_distribution) == nruns @test size(result.draws) == (dim, ndraws) end @@ -107,17 +98,10 @@ using Transducers fun = SciMLBase.OptimizationFunction(f, Optimization.AutoForwardDiff()) @test_throws ArgumentError multipathfinder(fun, 10; init) end - @testset "errors if neither dim nor init valid" begin - logp(x) = -sum(abs2, x) / 2 - nruns = 2 - @test_throws ArgumentError multipathfinder(logp, 10; nruns) - @test_throws ArgumentError multipathfinder(logp, 10; nruns, dim=0) - multipathfinder(logp, 10; nruns, dim=2) - multipathfinder(logp, 10; init=[randn(2) for _ in 1:nruns]) - end @testset "errors if neither init nor nruns valid" begin logp(x) = -sum(abs2, x) / 2 - @test_throws ArgumentError multipathfinder(logp, 10; dim=5, nruns=0) - multipathfinder(logp, 10; dim=5, nruns=2) + ℓ = build_logdensityproblem(logp, 5) + @test_throws ArgumentError multipathfinder(ℓ, 10; nruns=0) + multipathfinder(ℓ, 10; nruns=2) end end diff --git a/test/mvnormal.jl b/test/mvnormal.jl index 7b25fc2c..5b892870 100644 --- a/test/mvnormal.jl +++ b/test/mvnormal.jl @@ -1,6 +1,4 @@ -using AbstractDifferentiation using Distributions -using ForwardDiff using Optim using Pathfinder using Random @@ -11,10 +9,9 @@ include("test_utils.jl") @testset "MvNormal functions" begin @testset "fit_mvnormals" begin n = 10 - logp(x) = logp_banana(x) + ℓ = build_logdensityproblem(logp_banana, n) θ₀ = 10 * randn(n) - ad_backend = AD.ForwardDiffBackend() - fun = Pathfinder.build_optim_function(logp; ad_backend) + fun = Pathfinder.build_optim_function(ℓ) prob = Pathfinder.build_optim_problem(fun, θ₀) optimizer = Optim.LBFGS() history_length = optimizer.m diff --git a/test/optimize.jl b/test/optimize.jl index db71b7a9..e4785677 100644 --- a/test/optimize.jl +++ b/test/optimize.jl @@ -1,5 +1,3 @@ -using AbstractDifferentiation -using ForwardDiff using LinearAlgebra using Optim using OptimizationNLopt @@ -12,38 +10,25 @@ include("test_utils.jl") @testset "build_optim_function" begin n = 20 - f(x) = logp_banana(x) - ∇f(x) = ForwardDiff.gradient(f, x) - ad_backend = AD.ForwardDiffBackend() + ℓ = build_logdensityproblem(logp_banana, n) x = randn(n) - funs = [ - "user-provided gradient" => Pathfinder.build_optim_function(f, ∇f; ad_backend), - "automatic gradient" => Pathfinder.build_optim_function(f; ad_backend), - ] - @testset "$name" for (name, fun) in funs - @test fun isa SciMLBase.OptimizationFunction - @test SciMLBase.isinplace(fun) - @test fun.f(x, nothing) ≈ -f(x) - ∇fx = similar(x) - fun.grad(∇fx, x, nothing) - @test ∇fx ≈ -∇f(x) - H = similar(x, n, n) - fun.hess(H, x, nothing) - @test H ≈ -ForwardDiff.hessian(f, x) - Hv = similar(x) - v = randn(n) - fun.hv(Hv, x, v, nothing) - @test Hv ≈ H * v - end + fun = Pathfinder.build_optim_function(ℓ) + @test fun isa SciMLBase.OptimizationFunction + @test SciMLBase.isinplace(fun) + @test fun.f(x, nothing) ≈ -ℓ.logp(x) + ∇fx = similar(x) + fun.grad(∇fx, x, nothing) + @test ∇fx ≈ -ℓ.∇logp(x) + H = similar(x, n, n) + fun.hess(H, x, nothing) + @test H ≈ -ℓ.∇²logp(x) end @testset "build_optim_problem" begin n = 20 - f(x) = logp_banana(x) - ∇f(x) = ForwardDiff.gradient(f, x) - ad_backend = AD.ForwardDiffBackend() + ℓ = build_logdensityproblem(logp_banana, n) x0 = randn(n) - fun = Pathfinder.build_optim_function(f; ad_backend) + fun = Pathfinder.build_optim_function(ℓ) prob = Pathfinder.build_optim_problem(fun, x0) @test SciMLBase.isinplace(prob) @test prob isa SciMLBase.OptimizationProblem @@ -97,11 +82,10 @@ end P = inv(rand_pd_mat(Float64, n)) μ = randn(n) f(x) = -dot(x - μ, P, x - μ) / 2 - ∇f(x) = ForwardDiff.gradient(f, x) + ℓ = build_logdensityproblem(f, n) x0 = randn(n) - ad_backend = AD.ForwardDiffBackend() - fun = Pathfinder.build_optim_function(f; ad_backend) + fun = Pathfinder.build_optim_function(ℓ) prob = Pathfinder.build_optim_problem(fun, x0) optimizers = [ @@ -118,12 +102,12 @@ end @test optim_trace.points[end] ≈ μ @test optim_sol.u ≈ μ @test optim_trace.log_densities ≈ f.(optim_trace.points) - @test optim_trace.gradients ≈ ∇f.(optim_trace.points) atol = 1e-4 + @test optim_trace.gradients ≈ ℓ.∇logp.(optim_trace.points) atol = 1e-4 if !(optimizer isa NLopt.Opt) options = Optim.Options(; store_trace=true, extended_trace=true) res = Optim.optimize( - x -> -f(x), (y, x) -> copyto!(y, -∇f(x)), x0, optimizer, options + x -> -f(x), (y, x) -> copyto!(y, -ℓ.∇logp(x)), x0, optimizer, options ) @test Optim.iterations(res) == length(optim_trace.points) - 1 @test Optim.x_trace(res) ≈ optim_trace.points diff --git a/test/singlepath.jl b/test/singlepath.jl index 151e8486..f70a062f 100644 --- a/test/singlepath.jl +++ b/test/singlepath.jl @@ -1,4 +1,3 @@ -using AbstractDifferentiation using Distributions using ForwardDiff using LinearAlgebra @@ -6,16 +5,16 @@ using Optim using Optimization using Pathfinder using Random -using ReverseDiff using SciMLBase using Test using Transducers +include("test_utils.jl") + @testset "single path pathfinder" begin @testset "IsoNormal" begin # here pathfinder finds the exact solution after 1 iteration logp(x) = -sum(abs2, x) / 2 - ∇logp(x) = -x ndraws = 100 rngs = if VERSION ≥ v"1.7" [MersenneTwister(), Random.default_rng()] @@ -25,12 +24,12 @@ using Transducers seed = 42 @testset for dim in [1, 5, 10, 100], rng in rngs executor = rng isa MersenneTwister ? SequentialEx() : ThreadedEx() - + ℓ = build_logdensityproblem(logp, 5) init = randn(dim) Random.seed!(rng, seed) - result = @inferred pathfinder(logp, ∇logp; init, ndraws, rng, executor) + result = @inferred pathfinder(ℓ; init, ndraws, rng, executor) @test result isa PathfinderResult - @test result.input === (logp, ∇logp) + @test result.input === ℓ @test result.optim_prob isa SciMLBase.OptimizationProblem @test result.logp(init) ≈ logp(init) @test result.rng === rng @@ -56,14 +55,14 @@ using Transducers argmax(getproperty.(result.elbo_estimates, :value)) Random.seed!(rng, seed) - result2 = pathfinder(logp, ∇logp; init, ndraws, rng, executor) + result2 = pathfinder(ℓ; init, ndraws, rng, executor) @test result2.fit_iteration == result.fit_iteration @test result2.draws == result.draws @test getproperty.(result2.elbo_estimates, :value) == getproperty.(result.elbo_estimates, :value) ndraws = 2 - result3 = pathfinder(logp, ∇logp; init, ndraws, executor) + result3 = pathfinder(ℓ; init, ndraws, executor) @test size(result3.draws) == (dim, ndraws) end end @@ -79,31 +78,27 @@ using Transducers #! format: on P = inv(Symmetric(Σ)) logp(x) = -dot(x, P, x) / 2 - ∇logp(x) = -(P * x) dim = 5 - ad_backend = AD.ReverseDiffBackend() + ℓ = build_logdensityproblem(logp, dim) ndraws_elbo = 100 rngs = if VERSION ≥ v"1.7" [MersenneTwister(), Random.default_rng()] else [MersenneTwister()] end + x = randn(dim) seed = 38 optimizer = Optim.LBFGS(; m=6) @testset for rng in rngs executor = rng isa MersenneTwister ? SequentialEx() : ThreadedEx() Random.seed!(rng, seed) - result = @inferred pathfinder( - logp; rng, dim, optimizer, ndraws_elbo, ad_backend, executor - ) - @test result.input === logp + result = @inferred pathfinder(ℓ; rng, optimizer, ndraws_elbo, executor) + @test result.input === ℓ @test result.fit_distribution.Σ ≈ Σ rtol = 1e-1 @test result.optimizer == optimizer Random.seed!(rng, seed) - result2 = pathfinder( - logp; rng, dim, optimizer, ndraws_elbo, ad_backend, executor - ) + result2 = pathfinder(ℓ; rng, optimizer, ndraws_elbo, executor) @test result2.fit_distribution == result.fit_distribution @test result2.draws == result.draws @test getproperty.(result2.elbo_estimates, :value) == @@ -113,12 +108,12 @@ using Transducers Random.seed!(42) i = 0 callback = (args...,) -> (i += 1; false) - pathfinder(logp; dim, callback) + pathfinder(ℓ; callback) @test i ≠ 6 Random.seed!(42) i = 0 - pathfinder(logp; dim, callback, maxiters=5) + pathfinder(ℓ; callback, maxiters=5) @test i == 6 end end @@ -127,16 +122,17 @@ using Transducers dim = 5 nfail = 20 logp(x) = i ≤ nfail ? NaN : -sum(abs2, x) / 2 + ℓ = build_logdensityproblem(logp, dim) callback = (args...,) -> (i += 1; true) i = 1 - result = pathfinder(logp; dim, callback) + result = pathfinder(ℓ; callback) @test result.fit_distribution.μ ≈ zeros(dim) atol = 1e-6 @test result.fit_distribution.Σ ≈ diagm(ones(dim)) atol = 1e-6 @test result.num_tries == nfail + 1 @test result.optim_prob.u0 == result.optim_trace.points[1] i = 1 init = randn(dim) - result2 = pathfinder(logp; init, callback, ntries=nfail) + result2 = pathfinder(ℓ; init, callback, ntries=nfail) @test !isapprox(result2.fit_distribution.μ, zeros(dim); atol=1e-6) @test result2.fit_iteration == 0 @test isempty(result2.elbo_estimates) @@ -172,8 +168,8 @@ using Transducers @testset "errors if neither dim nor init valid" begin logp(x) = -sum(abs2, x) / 2 @test_throws ArgumentError pathfinder(logp) - @test_throws ArgumentError pathfinder(logp; dim=0) - pathfinder(logp; dim=3) - pathfinder(logp; init=randn(5)) + @test_throws ArgumentError pathfinder(LogDensityFunction(logp, 3)) + @test_throws ArgumentError pathfinder(build_logdensityproblem(logp, 0)) + pathfinder(build_logdensityproblem(logp, 3)) end end diff --git a/test/test_utils.jl b/test/test_utils.jl index 41fac220..d6d28187 100644 --- a/test/test_utils.jl +++ b/test/test_utils.jl @@ -1,4 +1,6 @@ +using ForwardDiff using LinearAlgebra +using LogDensityProblems using Pathfinder using Random @@ -32,3 +34,40 @@ function _fb(b, x) return -dot(y, inv(Σ), y) / 2 end logp_banana(x) = _fb(0.03, x) + +struct LogDensityFunction{F} + logp::F + dim::Int +end +function LogDensityProblems.capabilities(::Type{<:LogDensityFunction}) + return LogDensityProblems.LogDensityOrder{0}() +end +LogDensityProblems.dimension(ℓ::LogDensityFunction) = ℓ.dim +LogDensityProblems.logdensity(ℓ::LogDensityFunction, x) = ℓ.logp(x) + +struct LogDensityFunctionWithGradHess{F,G,H} + logp::F + ∇logp::G + ∇²logp::H + dim::Int +end +function LogDensityProblems.capabilities(::Type{<:LogDensityFunctionWithGradHess}) + return LogDensityProblems.LogDensityOrder{2}() +end +LogDensityProblems.dimension(ℓ::LogDensityFunctionWithGradHess) = ℓ.dim +LogDensityProblems.logdensity(ℓ::LogDensityFunctionWithGradHess, x) = ℓ.logp(x) +function LogDensityProblems.logdensity_and_gradient(ℓ::LogDensityFunctionWithGradHess, x) + return ℓ.logp(x), ℓ.∇logp(x) +end +function LogDensityProblems.logdensity_gradient_and_hessian( + ℓ::LogDensityFunctionWithGradHess, x +) + return ℓ.logp(x), ℓ.∇logp(x), ℓ.∇²logp(x) +end + +function build_logdensityproblem(f, n) + ∇f(x) = ForwardDiff.gradient(f, x) + Hf(x) = ForwardDiff.hessian(f, x) + ℓ = LogDensityFunctionWithGradHess(f, ∇f, Hf, n) + return ℓ +end diff --git a/test/transducers.jl b/test/transducers.jl index 6feca9a5..cd78c8f8 100644 --- a/test/transducers.jl +++ b/test/transducers.jl @@ -4,23 +4,6 @@ using Test using Transducers @testset "transducers integration" begin - @testset "_default_executor" begin - rng = MersenneTwister(42) - @test Pathfinder._default_executor(MersenneTwister(42); basesize=2) === - SequentialEx() - if VERSION ≥ v"1.7.0" - @test Pathfinder._default_executor(Random.GLOBAL_RNG; basesize=1) === - PreferParallel(; basesize=1) - @test Pathfinder._default_executor(Random.default_rng(); basesize=1) === - PreferParallel(; basesize=1) - else - @test Pathfinder._default_executor(Random.GLOBAL_RNG; basesize=1) === - SequentialEx() - @test Pathfinder._default_executor(Random.default_rng(); basesize=1) === - SequentialEx() - end - end - @testset "_findmax" begin x = randn(100) @test Pathfinder._findmax(x) == findmax(x)