diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index fee253ca..62209613 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -20,7 +20,7 @@ jobs: fail-fast: false matrix: version: - - '1.6' + - 'lts' - '1' - 'pre' os: @@ -68,7 +68,7 @@ jobs: include: - version: '1' downgrade: false - - version: '1.7' + - version: 'lts' downgrade: true steps: - uses: actions/checkout@v4 diff --git a/Project.toml b/Project.toml index 4b017f91..7d36694b 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Pathfinder" uuid = "b1d3bc72-d0e7-4279-b92f-7fa5d6d2d454" authors = ["Seth Axen and contributors"] -version = "0.9.8" +version = "0.9.9" [deps] ADTypes = "47edcb42-4c32-4615-8424-f2b9edc5f35b" @@ -23,7 +23,6 @@ SciMLBase = "0bca4576-84f4-4d90-8ffe-ffa030f20462" Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" StatsBase = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91" Transducers = "28d57a85-8fef-5791-bfe6-a80928e7c999" -UnPack = "3a884ed6-31ef-47d7-9d2a-63182c4928ed" [weakdeps] Accessors = "7d9f7c33-5ae7-4f3b-8dc6-eff91059b697" @@ -37,34 +36,34 @@ PathfinderDynamicHMCExt = "DynamicHMC" PathfinderTuringExt = ["Accessors", "DynamicPPL", "MCMCChains", "Turing"] [compat] -ADTypes = "0.2, 1" +ADTypes = "0.2.5, 1" Accessors = "0.1.12" Distributions = "0.25.87" DynamicHMC = "3.4.0" DynamicPPL = "0.25.2, 0.27, 0.28, 0.29, 0.30, 0.31, 0.32" Folds = "0.2.9" -ForwardDiff = "0.10.19" +ForwardDiff = "0.10.26" IrrationalConstants = "0.1.1, 0.2" -LinearAlgebra = "1.6" +LinearAlgebra = "1" LogDensityProblems = "2.1.0" LogDensityProblemsAD = "1.7.0" MCMCChains = "6.0.2" Optim = "1.7.2" -Optimization = "3.16.0, 4" -OptimizationOptimJL = "0.1.7, 0.2, 0.3, 0.4" +Optimization = "3.21, 4" +OptimizationNLopt = "0.2, 0.3" +OptimizationOptimJL = "0.2.1, 0.3, 0.4" PDMats = "0.11.26" PSIS = "0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9" ProgressLogging = "0.1.4" -Random = "1.6" +Random = "1" Requires = "1.1" -ReverseDiff = "1.4.5" -SciMLBase = "1.95.0, 2" -Statistics = "1.6" -StatsBase = "0.33.7, 0.34" +ReverseDiff = "1.15" +SciMLBase = "2.30" +Statistics = "1" +StatsBase = "0.33.17, 0.34" Transducers = "0.4.81" Turing = "0.31.4, 0.32, 0.33, 0.34, 0.35" -UnPack = "1" -julia = "1.6" +julia = "1.10" [extras] Accessors = "7d9f7c33-5ae7-4f3b-8dc6-eff91059b697" diff --git a/docs/Project.toml b/docs/Project.toml index 194eb4de..bd371380 100644 --- a/docs/Project.toml +++ b/docs/Project.toml @@ -19,19 +19,19 @@ TransformedLogDensities = "f9bc47f6-f3f8-4f3b-ab21-f8bc73906f26" Turing = "fce5fe82-541a-59a6-adf8-730c64b5f9a0" [compat] -ADTypes = "0.2, 1" +ADTypes = "0.2.5, 1" AdvancedHMC = "0.6" Documenter = "1" DocumenterCitations = "1.2" DocumenterInterLinks = "1" DynamicHMC = "3.4.0" -ForwardDiff = "0.10.19" +ForwardDiff = "0.10.26" LogDensityProblems = "2.1.0" LogDensityProblemsAD = "1.7" Pathfinder = "0.9" -ReverseDiff = "1.4.5" +ReverseDiff = "1.15" StatsFuns = "1" -StatsPlots = "0.14.21, 0.15" +StatsPlots = "0.15.2" TransformVariables = "0.6.2, 0.7, 0.8" TransformedLogDensities = "1.0.2" Turing = "0.31.4, 0.32, 0.33, 0.34, 0.35" diff --git a/src/Pathfinder.jl b/src/Pathfinder.jl index 5ba23927..5f3cf5dd 100644 --- a/src/Pathfinder.jl +++ b/src/Pathfinder.jl @@ -19,7 +19,6 @@ using SciMLBase: SciMLBase using Statistics: Statistics using StatsBase: StatsBase using Transducers: Transducers -using UnPack: @unpack export PathfinderResult, MultiPathfinderResult export pathfinder, multipathfinder diff --git a/src/optimize.jl b/src/optimize.jl index 84e6677d..5fce392d 100644 --- a/src/optimize.jl +++ b/src/optimize.jl @@ -98,54 +98,28 @@ struct OptimizationCallback{X,FX,∇FX,ID,CB} fail_on_nonfinite::Bool end -@static if isdefined(Optimization, :OptimizationState) - # Optimization v3.21.0 and later - function (cb::OptimizationCallback)(state::Optimization.OptimizationState, args...) - @unpack ( - xs, fxs, ∇fxs, progress_name, progress_id, maxiters, callback, fail_on_nonfinite - ) = cb - ret = callback !== nothing && callback(state, args...) - iteration = state.iter - Base.@logmsg ProgressLogging.ProgressLevel progress_name progress = - iteration / maxiters _id = progress_id - - x = copy(state.u) - fx = -state.objective - ∇fx = state.grad === nothing ? nothing : -state.grad - - # some backends mutate x, so we must copy it - push!(xs, x) - push!(fxs, fx) - push!(∇fxs, ∇fx) - - if fail_on_nonfinite && !ret - ret = (isnan(fx) || fx == Inf || (∇fx !== nothing && any(!isfinite, ∇fx)))::Bool - end - - return ret +function (cb::OptimizationCallback)(state::Optimization.OptimizationState, args...) + (; xs, fxs, ∇fxs, progress_name, progress_id, maxiters, callback, fail_on_nonfinite) = + cb + ret = callback !== nothing && callback(state, args...) + iteration = state.iter + Base.@logmsg ProgressLogging.ProgressLevel progress_name progress = iteration / maxiters _id = + progress_id + + x = copy(state.u) + fx = -state.objective + ∇fx = state.grad === nothing ? nothing : -state.grad + + # some backends mutate x, so we must copy it + push!(xs, x) + push!(fxs, fx) + push!(∇fxs, ∇fx) + + if fail_on_nonfinite && !ret + ret = (isnan(fx) || fx == Inf || (∇fx !== nothing && any(!isfinite, ∇fx)))::Bool end -else - # Optimization v3.20.X and earlier - function (cb::OptimizationCallback)(x, nfx, args...) - @unpack ( - xs, fxs, ∇fxs, progress_name, progress_id, maxiters, callback, fail_on_nonfinite - ) = cb - ret = callback !== nothing && callback(x, nfx, args...) - iteration = length(cb.xs) - Base.@logmsg ProgressLogging.ProgressLevel progress_name progress = - iteration / maxiters _id = progress_id - # some backends mutate x, so we must copy it - push!(xs, copy(x)) - push!(fxs, -nfx) - push!(∇fxs, nothing) - - if fail_on_nonfinite && !ret - ret = (isnan(nfx) || nfx == -Inf)::Bool - end - - return ret - end + return ret end struct OptimizationTrace{P,L} diff --git a/src/singlepath.jl b/src/singlepath.jl index a8bb7f27..58944c84 100644 --- a/src/singlepath.jl +++ b/src/singlepath.jl @@ -202,7 +202,7 @@ function pathfinder( kwargs..., ) end - @unpack ( + (; itry, success, optim_prob, diff --git a/src/woodbury.jl b/src/woodbury.jl index 3da91cde..4579ce7d 100644 --- a/src/woodbury.jl +++ b/src/woodbury.jl @@ -292,7 +292,7 @@ function Base.AbstractMatrix{T}(W::WoodburyPDMat) where {T} F = pdfactorize(W) Fnew = WoodburyPDFactorization( convert(AbstractMatrix{T}, F.U), - convert(AbstractMatrix{T}, F.Q), + convert(LinearAlgebra.AbstractQ{T}, F.Q), convert(AbstractMatrix{T}, F.V), ) return WoodburyPDMat( diff --git a/test/elbo.jl b/test/elbo.jl index 0d24e210..4faf2f7f 100644 --- a/test/elbo.jl +++ b/test/elbo.jl @@ -34,11 +34,7 @@ using Transducers logp(x) = logpdf(target_dist, x[1]) σs = [1e-3, 0.05, σ_target, 1.0, 1.1, 1.2, 5.0, 10.0] dists = Normal.(0, σs) - if VERSION ≥ v"1.7.0" - executors = [SequentialEx(), ThreadedEx()] - else - executors = [SequentialEx()] - end + executors = [SequentialEx(), ThreadedEx()] @testset "$executor" for executor in executors rng = Random.seed!(Random.default_rng(), 42) lopt, estimates = @inferred Pathfinder.maximize_elbo( diff --git a/test/integration/AdvancedHMC/Project.toml b/test/integration/AdvancedHMC/Project.toml index d90ca4f6..0665164d 100644 --- a/test/integration/AdvancedHMC/Project.toml +++ b/test/integration/AdvancedHMC/Project.toml @@ -18,7 +18,7 @@ TransformedLogDensities = "f9bc47f6-f3f8-4f3b-ab21-f8bc73906f26" [compat] AdvancedHMC = "0.6" Distributions = "0.25.87" -ForwardDiff = "0.10.19" +ForwardDiff = "0.10.26" LogDensityProblems = "2.1.0" LogDensityProblemsAD = "1.7" MCMCDiagnosticTools = "0.3" @@ -28,4 +28,4 @@ Statistics = "1" StatsFuns = "1" TransformVariables = "0.6.2, 0.7, 0.8" TransformedLogDensities = "1.0.2" -julia = "1.6" +julia = "1.10" diff --git a/test/integration/DynamicHMC/Project.toml b/test/integration/DynamicHMC/Project.toml index bb77e6c2..adac8366 100644 --- a/test/integration/DynamicHMC/Project.toml +++ b/test/integration/DynamicHMC/Project.toml @@ -24,4 +24,4 @@ Statistics = "1" StatsFuns = "1" TransformVariables = "0.6.2, 0.7, 0.8" TransformedLogDensities = "1.0.2" -julia = "1.6" +julia = "1.10" diff --git a/test/integration/Turing/Project.toml b/test/integration/Turing/Project.toml index 1938c556..2c8158cd 100644 --- a/test/integration/Turing/Project.toml +++ b/test/integration/Turing/Project.toml @@ -10,4 +10,4 @@ Turing = "fce5fe82-541a-59a6-adf8-730c64b5f9a0" LogDensityProblems = "2.1.0" Pathfinder = "0.9" Turing = "0.31.4, 0.32, 0.33, 0.34, 0.35" -julia = "1.6" +julia = "1.10" diff --git a/test/multipath.jl b/test/multipath.jl index 42ab3c93..7088be77 100644 --- a/test/multipath.jl +++ b/test/multipath.jl @@ -22,11 +22,7 @@ using Transducers d = MvNormal(μ, Σ) logp(x) = logpdf(d, x) ℓ = build_logdensityproblem(logp, dim, 2) - rngs = if VERSION ≥ v"1.7" - [MersenneTwister(), Random.default_rng()] - else - [MersenneTwister()] - end + rngs = [MersenneTwister(), Random.default_rng()] seed = 76 @testset for rng in rngs executor = rng isa MersenneTwister ? SequentialEx() : ThreadedEx() diff --git a/test/optimize.jl b/test/optimize.jl index 40a7eb49..9d882781 100644 --- a/test/optimize.jl +++ b/test/optimize.jl @@ -73,18 +73,13 @@ end (isdefined(Optimization, :OptimizationState) && !isfinite(gval)) ) ) - if isdefined(Optimization, :OptimizationState) - # Optimization v3.21.0 and later - callback = (state, args...) -> cbfail - state = Optimization.OptimizationState(; - iter=0, u=x, objective=-fval, grad=-∇f(x) - ) - cb_args = (state, -fval) - else - # Optimization v3.20.X and earlier - callback = (x, fx, args...) -> cbfail - cb_args = (x, -fval) - end + + callback = (state, args...) -> cbfail + state = Optimization.OptimizationState(; + iter=0, u=x, objective=-fval, grad=-∇f(x) + ) + cb_args = (state, -fval) + cb = Pathfinder.OptimizationCallback( xs, fxs, diff --git a/test/singlepath.jl b/test/singlepath.jl index 0e21ec69..aa03fcea 100644 --- a/test/singlepath.jl +++ b/test/singlepath.jl @@ -16,11 +16,7 @@ using Transducers # here pathfinder finds the exact solution after 1 iteration logp(x) = -sum(abs2, x) / 2 ndraws = 100 - rngs = if VERSION ≥ v"1.7" - [MersenneTwister(), Random.default_rng()] - else - [MersenneTwister()] - end + rngs = [MersenneTwister(), Random.default_rng()] seed = 42 @testset for dim in [1, 5, 10, 100], rng in rngs executor = rng isa MersenneTwister ? SequentialEx() : ThreadedEx() @@ -83,11 +79,7 @@ using Transducers dim = 5 ℓ = build_logdensityproblem(logp, dim, 2) ndraws_elbo = 100 - rngs = if VERSION ≥ v"1.7" - [MersenneTwister(), Random.default_rng()] - else - [MersenneTwister()] - end + rngs = [MersenneTwister(), Random.default_rng()] x = randn(dim) seed = 38 optimizer = Optim.LBFGS(; m=6)