diff --git a/Project.toml b/Project.toml index 44f18d17b..05d5bafcf 100644 --- a/Project.toml +++ b/Project.toml @@ -32,7 +32,6 @@ TimerOutputs = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f" [weakdeps] BandedMatrices = "aae01518-5342-5314-be14-df237901396f" -Enlsip = "d5306a6b-d590-428d-a53a-eb3bb2d36f2d" FastLevenbergMarquardt = "7a0df574-e128-4d35-8cbd-3d84502bf7ce" FixedPointAcceleration = "817d07cb-a79a-5c30-9a31-890123675176" LeastSquaresOptim = "0fc2ff8b-aaa3-5acd-a817-1944a5e08891" @@ -46,7 +45,6 @@ Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" [extensions] NonlinearSolveBandedMatricesExt = "BandedMatrices" -NonlinearSolveEnlsipExt = "Enlsip" NonlinearSolveFastLevenbergMarquardtExt = "FastLevenbergMarquardt" NonlinearSolveFixedPointAccelerationExt = "FixedPointAcceleration" NonlinearSolveLeastSquaresOptimExt = "LeastSquaresOptim" @@ -67,7 +65,6 @@ BenchmarkTools = "1.4" CUDA = "5.2" ConcreteStructs = "0.2.3" DiffEqBase = "6.149.0" -Enlsip = "0.9" Enzyme = "0.12" ExplicitImports = "1.4.4" FastBroadcast = "0.2.8" @@ -119,7 +116,6 @@ Aqua = "4c88cf16-eb10-579e-8560-4a9242c79595" BandedMatrices = "aae01518-5342-5314-be14-df237901396f" BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf" CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba" -Enlsip = "d5306a6b-d590-428d-a53a-eb3bb2d36f2d" Enzyme = "7da242da-08ed-463a-9acd-ee780be4f1d9" ExplicitImports = "7d51a73a-1435-4ff3-83d9-f097790105c7" FastLevenbergMarquardt = "7a0df574-e128-4d35-8cbd-3d84502bf7ce" @@ -145,4 +141,4 @@ Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" [targets] -test = ["Aqua", "BandedMatrices", "BenchmarkTools", "CUDA", "Enlsip", "Enzyme", "ExplicitImports", "FastLevenbergMarquardt", "FixedPointAcceleration", "LeastSquaresOptim", "MINPACK", "ModelingToolkit", "NLSolvers", "NLsolve", "NaNMath", "NonlinearProblemLibrary", "OrdinaryDiffEq", "Pkg", "Random", "ReTestItems", "SIAMFANLEquations", "SpeedMapping", "StableRNGs", "StaticArrays", "Sundials", "Symbolics", "Test", "Zygote"] +test = ["Aqua", "BandedMatrices", "BenchmarkTools", "CUDA", "Enzyme", "ExplicitImports", "FastLevenbergMarquardt", "FixedPointAcceleration", "LeastSquaresOptim", "MINPACK", "ModelingToolkit", "NLSolvers", "NLsolve", "NaNMath", "NonlinearProblemLibrary", "OrdinaryDiffEq", "Pkg", "Random", "ReTestItems", "SIAMFANLEquations", "SpeedMapping", "StableRNGs", "StaticArrays", "Sundials", "Symbolics", "Test", "Zygote"] diff --git a/docs/src/basics/nonlinear_solution.md b/docs/src/basics/nonlinear_solution.md index ce1abcc4c..6b74368fb 100644 --- a/docs/src/basics/nonlinear_solution.md +++ b/docs/src/basics/nonlinear_solution.md @@ -9,7 +9,6 @@ SciMLBase.NonlinearSolution ```@docs SciMLBase.NLStats -NonlinearSolve.ImmutableNLStats ``` ## Return Code diff --git a/src/NonlinearSolve.jl b/src/NonlinearSolve.jl index b92aac597..39e71d3d6 100644 --- a/src/NonlinearSolve.jl +++ b/src/NonlinearSolve.jl @@ -34,13 +34,14 @@ using PrecompileTools: @recompile_invalidations, @compile_workload, @setup_workl istril, istriu, lu, mul!, norm, pinv, tril!, triu! using LineSearches: LineSearches using LinearSolve: LinearSolve, LUFactorization, QRFactorization, ComposePreconditioner, - InvPreconditioner, needs_concrete_A + InvPreconditioner, needs_concrete_A, AbstractFactorization, + DefaultAlgorithmChoice, DefaultLinearSolver using MaybeInplace: @bb using Printf: @printf using Preferences: Preferences, @load_preference, @set_preferences! using RecursiveArrayTools: recursivecopy!, recursivefill! using SciMLBase: AbstractNonlinearAlgorithm, JacobianWrapper, AbstractNonlinearProblem, - AbstractSciMLOperator, _unwrap_val, has_jac, isinplace + AbstractSciMLOperator, _unwrap_val, has_jac, isinplace, NLStats using SparseArrays: AbstractSparseMatrix, SparseMatrixCSC using SparseDiffTools: SparseDiffTools, AbstractSparsityDetection, ApproximateJacobianSparsity, JacPrototypeSparsityDetection, diff --git a/src/abstract_types.jl b/src/abstract_types.jl index dc7b638a1..0dc4d8f5a 100644 --- a/src/abstract_types.jl +++ b/src/abstract_types.jl @@ -142,7 +142,6 @@ abstract type AbstractNonlinearSolveLineSearchCache end function reinit_cache!( cache::AbstractNonlinearSolveLineSearchCache, args...; p = cache.p, kwargs...) - cache.nf[] = 0 cache.p = p end @@ -235,7 +234,7 @@ function __show_cache(io::IO, cache::AbstractNonlinearSolveCache, indent = 0) println(io, (" "^(indent + 4)) * "u = ", get_u(cache), ",") println(io, (" "^(indent + 4)) * "residual = ", get_fu(cache), ",") println(io, (" "^(indent + 4)) * "inf-norm(residual) = ", norm(get_fu(cache), Inf), ",") - println(io, " "^(indent + 4) * "nsteps = ", get_nsteps(cache), ",") + println(io, " "^(indent + 4) * "nsteps = ", cache.stats.nsteps, ",") println(io, " "^(indent + 4) * "retcode = ", cache.retcode) print(io, " "^(indent) * ")") end diff --git a/src/core/approximate_jacobian.jl b/src/core/approximate_jacobian.jl index 4328af345..e61f34032 100644 --- a/src/core/approximate_jacobian.jl +++ b/src/core/approximate_jacobian.jl @@ -95,7 +95,7 @@ end inv_workspace # Counters - nf::Int + stats::NLStats nsteps::Int nresets::Int max_resets::Int @@ -131,7 +131,7 @@ function __reinit_internal!(cache::ApproximateJacobianSolveCache{INV, GB, iip}, end cache.p = p - cache.nf = 1 + __reinit_internal!(cache.stats) cache.nsteps = 0 cache.nresets = 0 cache.steps_since_last_reset = 0 @@ -151,8 +151,9 @@ end function SciMLBase.__init( prob::AbstractNonlinearProblem{uType, iip}, alg::ApproximateJacobianSolveAlgorithm, - args...; alias_u0 = false, maxtime = nothing, maxiters = 1000, abstol = nothing, - reltol = nothing, linsolve_kwargs = (;), termination_condition = nothing, + args...; stats = empty_nlstats(), alias_u0 = false, maxtime = nothing, + maxiters = 1000, abstol = nothing, reltol = nothing, + linsolve_kwargs = (;), termination_condition = nothing, internalnorm::F = DEFAULT_NORM, kwargs...) where {uType, iip, F} timer = get_timer_output() @static_timeit timer "cache construction" begin @@ -164,8 +165,8 @@ function SciMLBase.__init( INV = store_inverse_jacobian(alg.update_rule) linsolve = get_linear_solver(alg.descent) - initialization_cache = __internal_init( - prob, alg.initialization, alg, f, fu, u, p; linsolve, maxiters, internalnorm) + initialization_cache = __internal_init(prob, alg.initialization, alg, f, fu, u, p; + stats, linsolve, maxiters, internalnorm) abstol, reltol, termination_cache = init_termination_cache( prob, abstol, reltol, fu, u, termination_condition) @@ -173,9 +174,8 @@ function SciMLBase.__init( J = initialization_cache(nothing) inv_workspace, J = INV ? __safe_inv_workspace(J) : (nothing, J) - descent_cache = __internal_init( - prob, alg.descent, J, fu, u; abstol, reltol, internalnorm, - linsolve_kwargs, pre_inverted = Val(INV), timer) + descent_cache = __internal_init(prob, alg.descent, J, fu, u; stats, abstol, reltol, + internalnorm, linsolve_kwargs, pre_inverted = Val(INV), timer) du = get_du(descent_cache) reinit_rule_cache = __internal_init(alg.reinit_rule, J, fu, u, du) @@ -192,7 +192,7 @@ function SciMLBase.__init( supports_trust_region(alg.descent) || error("Trust Region not supported by \ $(alg.descent).") trustregion_cache = __internal_init( - prob, alg.trustregion, f, fu, u, p; internalnorm, kwargs...) + prob, alg.trustregion, f, fu, u, p; stats, internalnorm, kwargs...) GB = :TrustRegion end @@ -200,12 +200,12 @@ function SciMLBase.__init( supports_line_search(alg.descent) || error("Line Search not supported by \ $(alg.descent).") linesearch_cache = __internal_init( - prob, alg.linesearch, f, fu, u, p; internalnorm, kwargs...) + prob, alg.linesearch, f, fu, u, p; stats, internalnorm, kwargs...) GB = :LineSearch end update_rule_cache = __internal_init( - prob, alg.update_rule, J, fu, u, du; internalnorm) + prob, alg.update_rule, J, fu, u, du; stats, internalnorm) trace = init_nonlinearsolve_trace(prob, alg, u, fu, ApplyArray(__zero, J), du; uses_jacobian_inverse = Val(INV), kwargs...) @@ -213,7 +213,7 @@ function SciMLBase.__init( return ApproximateJacobianSolveCache{INV, GB, iip, maxtime !== nothing}( fu, u, u_cache, p, du, J, alg, prob, initialization_cache, descent_cache, linesearch_cache, trustregion_cache, update_rule_cache, - reinit_rule_cache, inv_workspace, 0, 0, 0, alg.max_resets, + reinit_rule_cache, inv_workspace, stats, 0, 0, alg.max_resets, maxiters, maxtime, alg.max_shrink_times, 0, timer, 0.0, termination_cache, trace, ReturnCode.Default, false, false, kwargs) end @@ -223,7 +223,7 @@ function __step!(cache::ApproximateJacobianSolveCache{INV, GB, iip}; recompute_jacobian::Union{Nothing, Bool} = nothing) where {INV, GB, iip} new_jacobian = true @static_timeit cache.timer "jacobian init/reinit" begin - if get_nsteps(cache) == 0 # First Step is special ignore kwargs + if cache.nsteps == 0 # First Step is special ignore kwargs J_init = __internal_solve!( cache.initialization_cache, cache.fu, cache.u, Val(false)) if INV diff --git a/src/core/generalized_first_order.jl b/src/core/generalized_first_order.jl index c16cf043d..84e74478f 100644 --- a/src/core/generalized_first_order.jl +++ b/src/core/generalized_first_order.jl @@ -97,7 +97,7 @@ concrete_jac(::GeneralizedFirstOrderAlgorithm{CJ}) where {CJ} = CJ trustregion_cache # Counters - nf::Int + stats::NLStats nsteps::Int maxiters::Int maxtime @@ -135,7 +135,7 @@ function __reinit_internal!( end cache.p = p - cache.nf = 1 + __reinit_internal!(cache.stats) cache.nsteps = 0 cache.maxiters = maxiters cache.maxtime = maxtime @@ -153,9 +153,10 @@ end function SciMLBase.__init( prob::AbstractNonlinearProblem{uType, iip}, alg::GeneralizedFirstOrderAlgorithm, - args...; alias_u0 = false, maxiters = 1000, abstol = nothing, - reltol = nothing, maxtime = nothing, termination_condition = nothing, - internalnorm = DEFAULT_NORM, linsolve_kwargs = (;), kwargs...) where {uType, iip} + args...; stats = empty_nlstats(), alias_u0 = false, maxiters = 1000, + abstol = nothing, reltol = nothing, maxtime = nothing, + termination_condition = nothing, internalnorm = DEFAULT_NORM, + linsolve_kwargs = (;), kwargs...) where {uType, iip} timer = get_timer_output() @static_timeit timer "cache construction" begin (; f, u0, p) = prob @@ -170,11 +171,11 @@ function SciMLBase.__init( linsolve_kwargs = merge((; abstol, reltol), linsolve_kwargs) jac_cache = JacobianCache( - prob, alg, f, fu, u, p; autodiff = alg.jacobian_ad, linsolve, + prob, alg, f, fu, u, p; stats, autodiff = alg.jacobian_ad, linsolve, jvp_autodiff = alg.forward_ad, vjp_autodiff = alg.reverse_ad) J = jac_cache(nothing) - descent_cache = __internal_init(prob, alg.descent, J, fu, u; abstol, reltol, - internalnorm, linsolve_kwargs, timer) + descent_cache = __internal_init(prob, alg.descent, J, fu, u; stats, abstol, + reltol, internalnorm, linsolve_kwargs, timer) du = get_du(descent_cache) if alg.trustregion !== missing && alg.linesearch !== missing @@ -189,7 +190,7 @@ function SciMLBase.__init( supports_trust_region(alg.descent) || error("Trust Region not supported by \ $(alg.descent).") trustregion_cache = __internal_init( - prob, alg.trustregion, f, fu, u, p; internalnorm, kwargs...) + prob, alg.trustregion, f, fu, u, p; stats, internalnorm, kwargs...) GB = :TrustRegion end @@ -197,7 +198,7 @@ function SciMLBase.__init( supports_line_search(alg.descent) || error("Line Search not supported by \ $(alg.descent).") linesearch_cache = __internal_init( - prob, alg.linesearch, f, fu, u, p; internalnorm, kwargs...) + prob, alg.linesearch, f, fu, u, p; stats, internalnorm, kwargs...) GB = :LineSearch end @@ -206,7 +207,7 @@ function SciMLBase.__init( return GeneralizedFirstOrderAlgorithmCache{iip, GB, maxtime !== nothing}( fu, u, u_cache, p, du, J, alg, prob, jac_cache, descent_cache, linesearch_cache, - trustregion_cache, 0, 0, maxiters, maxtime, alg.max_shrink_times, timer, + trustregion_cache, stats, 0, maxiters, maxtime, alg.max_shrink_times, timer, 0.0, true, termination_cache, trace, ReturnCode.Default, false, kwargs) end end diff --git a/src/core/generic.jl b/src/core/generic.jl index 75d494730..44e6e1152 100644 --- a/src/core/generic.jl +++ b/src/core/generic.jl @@ -1,11 +1,11 @@ function SciMLBase.__solve(prob::Union{NonlinearProblem, NonlinearLeastSquaresProblem}, - alg::AbstractNonlinearSolveAlgorithm, args...; kwargs...) - cache = init(prob, alg, args...; kwargs...) + alg::AbstractNonlinearSolveAlgorithm, args...; stats = empty_nlstats(), kwargs...) + cache = SciMLBase.__init(prob, alg, args...; stats, kwargs...) return solve!(cache) end function not_terminated(cache::AbstractNonlinearSolveCache) - return !cache.force_stop && get_nsteps(cache) < cache.maxiters + return !cache.force_stop && cache.nsteps < cache.maxiters end function SciMLBase.solve!(cache::AbstractNonlinearSolveCache) @@ -16,21 +16,16 @@ function SciMLBase.solve!(cache::AbstractNonlinearSolveCache) # The solver might have set a different `retcode` if cache.retcode == ReturnCode.Default cache.retcode = ifelse( - get_nsteps(cache) ≥ cache.maxiters, ReturnCode.MaxIters, ReturnCode.Success) + cache.nsteps ≥ cache.maxiters, ReturnCode.MaxIters, ReturnCode.Success) end update_from_termination_cache!(cache.termination_cache, cache) - update_trace!(cache.trace, get_nsteps(cache), get_u(cache), + update_trace!(cache.trace, cache.nsteps, get_u(cache), get_fu(cache), nothing, nothing, nothing; last = True) return SciMLBase.build_solution(cache.prob, cache.alg, get_u(cache), get_fu(cache); - cache.retcode, stats = __compile_stats(cache), cache.trace) -end - -function __compile_stats(cache::AbstractNonlinearSolveCache) - return ImmutableNLStats(get_nf(cache), get_njacs(cache), get_nfactors(cache), - get_nsolve(cache), get_nsteps(cache)) + cache.retcode, cache.stats, cache.trace) end """ @@ -55,7 +50,8 @@ function SciMLBase.step!(cache::AbstractNonlinearSolveCache{iip, timeit}, __step!(cache, args...; kwargs...) end - hasfield(typeof(cache), :nsteps) && (cache.nsteps += 1) + cache.stats.nsteps += 1 + cache.nsteps += 1 if timeit cache.total_time += time() - time_start diff --git a/src/core/spectral_methods.jl b/src/core/spectral_methods.jl index 3d9a03e50..85e58c1a3 100644 --- a/src/core/spectral_methods.jl +++ b/src/core/spectral_methods.jl @@ -60,7 +60,7 @@ concrete_jac(::GeneralizedDFSane) = nothing linesearch_cache # Counters - nf::Int + stats::NLStats nsteps::Int maxiters::Int maxtime @@ -106,7 +106,7 @@ function __reinit_internal!( reset!(cache.trace) reinit!(cache.termination_cache, get_fu(cache), get_u(cache); kwargs...) - cache.nf = 1 + __reinit_internal!(cache.stats) cache.nsteps = 0 cache.maxiters = maxiters cache.maxtime = maxtime @@ -116,9 +116,9 @@ end @internal_caches GeneralizedDFSaneCache :linesearch_cache -function SciMLBase.__init(prob::AbstractNonlinearProblem, alg::GeneralizedDFSane, - args...; alias_u0 = false, maxiters = 1000, abstol = nothing, - reltol = nothing, termination_condition = nothing, +function SciMLBase.__init(prob::AbstractNonlinearProblem, alg::GeneralizedDFSane, args...; + stats = empty_nlstats(), alias_u0 = false, maxiters = 1000, + abstol = nothing, reltol = nothing, termination_condition = nothing, internalnorm::F = DEFAULT_NORM, maxtime = nothing, kwargs...) where {F} timer = get_timer_output() @static_timeit timer "cache construction" begin @@ -130,8 +130,8 @@ function SciMLBase.__init(prob::AbstractNonlinearProblem, alg::GeneralizedDFSane fu = evaluate_f(prob, u) @bb fu_cache = copy(fu) - linesearch_cache = __internal_init( - prob, alg.linesearch, prob.f, fu, u, prob.p; maxiters, internalnorm, kwargs...) + linesearch_cache = __internal_init(prob, alg.linesearch, prob.f, fu, u, prob.p; + stats, maxiters, internalnorm, kwargs...) abstol, reltol, tc_cache = init_termination_cache( prob, abstol, reltol, fu, u_cache, termination_condition) @@ -150,7 +150,7 @@ function SciMLBase.__init(prob::AbstractNonlinearProblem, alg::GeneralizedDFSane return GeneralizedDFSaneCache{isinplace(prob), maxtime !== nothing}( fu, fu_cache, u, u_cache, prob.p, du, alg, prob, σ_n, T(alg.σ_min), - T(alg.σ_max), linesearch_cache, 0, 0, maxiters, maxtime, + T(alg.σ_max), linesearch_cache, stats, 0, maxiters, maxtime, timer, 0.0, tc_cache, trace, ReturnCode.Default, false, kwargs) end end diff --git a/src/default.jl b/src/default.jl index fa8f1f786..660caab41 100644 --- a/src/default.jl +++ b/src/default.jl @@ -59,6 +59,7 @@ end best::Int current::Int nsteps::Int + stats::NLStats total_time::Float64 maxtime retcode::ReturnCode.T @@ -90,6 +91,7 @@ end function reinit_cache!(cache::NonlinearSolvePolyAlgorithmCache, args...; kwargs...) foreach(c -> reinit_cache!(c, args...; kwargs...), cache.caches) cache.current = cache.alg.start_index + __reinit_internal!(cache.stats) cache.nsteps = 0 cache.total_time = 0.0 end @@ -98,8 +100,8 @@ for (probType, pType) in ((:NonlinearProblem, :NLS), (:NonlinearLeastSquaresProb algType = NonlinearSolvePolyAlgorithm{pType} @eval begin function SciMLBase.__init( - prob::$probType, alg::$algType{N}, args...; maxtime = nothing, - maxiters = 1000, internalnorm = DEFAULT_NORM, + prob::$probType, alg::$algType{N}, args...; stats = empty_nlstats(), + maxtime = nothing, maxiters = 1000, internalnorm = DEFAULT_NORM, alias_u0 = false, verbose = true, kwargs...) where {N} if (alias_u0 && !ismutable(prob.u0)) verbose && @warn "`alias_u0` has been set to `true`, but `u0` is \ @@ -115,13 +117,14 @@ for (probType, pType) in ((:NonlinearProblem, :NLS), (:NonlinearLeastSquaresProb alias_u0 && (prob = remake(prob; u0 = u0_aliased)) return NonlinearSolvePolyAlgorithmCache{isinplace(prob), N, maxtime !== nothing}( map( - solver -> SciMLBase.__init(prob, solver, args...; maxtime, + solver -> SciMLBase.__init(prob, solver, args...; stats, maxtime, internalnorm, alias_u0, verbose, kwargs...), alg.algs), alg, -1, alg.start_index, 0, + stats, 0.0, maxtime, ReturnCode.Default, @@ -181,7 +184,6 @@ end push!(calls, quote fus = tuple($(Tuple(resids)...)) minfu, idx = __findmin(cache.internalnorm, fus) - stats = __compile_stats(cache.caches[idx]) end) for i in 1:N push!(calls, quote @@ -203,7 +205,7 @@ end end return __build_solution_less_specialize( cache.caches[idx].prob, cache.alg, u, fus[idx]; - retcode, stats, cache.caches[idx].trace) + retcode, stats = cache.stats, cache.caches[idx].trace) end) return Expr(:block, calls...) @@ -250,7 +252,8 @@ end for (probType, pType) in ((:NonlinearProblem, :NLS), (:NonlinearLeastSquaresProblem, :NLLS)) algType = NonlinearSolvePolyAlgorithm{pType} @eval begin - @generated function SciMLBase.__solve(prob::$probType, alg::$algType{N}, args...; + @generated function SciMLBase.__solve( + prob::$probType, alg::$algType{N}, args...; stats = empty_nlstats(), alias_u0 = false, verbose = true, kwargs...) where {N} sol_syms = [gensym("sol") for _ in 1:N] prob_syms = [gensym("prob") for _ in 1:N] @@ -280,8 +283,9 @@ for (probType, pType) in ((:NonlinearProblem, :NLS), (:NonlinearLeastSquaresProb else $(prob_syms[i]) = prob end - $(cur_sol) = SciMLBase.__solve($(prob_syms[i]), alg.algs[$(i)], - args...; alias_u0, verbose, kwargs...) + $(cur_sol) = SciMLBase.__solve( + $(prob_syms[i]), alg.algs[$(i)], args...; + stats, alias_u0, verbose, kwargs...) if SciMLBase.successful_retcode($(cur_sol)) if alias_u0 copyto!(u0, $(cur_sol).u) diff --git a/src/descent/damped_newton.jl b/src/descent/damped_newton.jl index d8f25634a..77b204b13 100644 --- a/src/descent/damped_newton.jl +++ b/src/descent/damped_newton.jl @@ -51,8 +51,8 @@ end @internal_caches DampedNewtonDescentCache :lincache :damping_fn_cache -function __internal_init(prob::AbstractNonlinearProblem, alg::DampedNewtonDescent, J, - fu, u; pre_inverted::Val{INV} = False, linsolve_kwargs = (;), +function __internal_init(prob::AbstractNonlinearProblem, alg::DampedNewtonDescent, J, fu, + u; stats, pre_inverted::Val{INV} = False, linsolve_kwargs = (;), abstol = nothing, timer = get_timer_output(), reltol = nothing, alias_J = true, shared::Val{N} = Val(1), kwargs...) where {INV, N} length(fu) != length(u) && @@ -94,7 +94,7 @@ function __internal_init(prob::AbstractNonlinearProblem, alg::DampedNewtonDescen rhs_damp = fu end damping_fn_cache = __internal_init(prob, alg.damping_fn, alg.initial_damping, - jac_damp, rhs_damp, u, False; kwargs...) + jac_damp, rhs_damp, u, False; stats, kwargs...) D = damping_fn_cache(nothing) D isa Number && (D = D * I) rhs_cache = vcat(_vec(fu), _vec(u)) @@ -115,7 +115,7 @@ function __internal_init(prob::AbstractNonlinearProblem, alg::DampedNewtonDescen jac_damp = requires_normal_form_jacobian(alg.damping_fn) ? JᵀJ : J rhs_damp = requires_normal_form_rhs(alg.damping_fn) ? Jᵀfu : fu damping_fn_cache = __internal_init(prob, alg.damping_fn, alg.initial_damping, - jac_damp, rhs_damp, u, True; kwargs...) + jac_damp, rhs_damp, u, True; stats, kwargs...) D = damping_fn_cache(nothing) @bb J_cache = similar(JᵀJ) @bb @. J_cache = 0 @@ -125,7 +125,7 @@ function __internal_init(prob::AbstractNonlinearProblem, alg::DampedNewtonDescen end lincache = LinearSolverCache( - alg, alg.linsolve, A, b, _vec(u); abstol, reltol, linsolve_kwargs...) + alg, alg.linsolve, A, b, _vec(u); stats, abstol, reltol, linsolve_kwargs...) return DampedNewtonDescentCache{INV, mode}( J_cache, δu, δus, lincache, JᵀJ, Jᵀfu, rhs_cache, damping_fn_cache, timer) diff --git a/src/descent/newton.jl b/src/descent/newton.jl index 36edbf86c..2fe7abf9a 100644 --- a/src/descent/newton.jl +++ b/src/descent/newton.jl @@ -32,22 +32,22 @@ end @internal_caches NewtonDescentCache :lincache -function __internal_init( - prob::NonlinearProblem, alg::NewtonDescent, J, fu, u; shared::Val{N} = Val(1), - pre_inverted::Val{INV} = False, linsolve_kwargs = (;), abstol = nothing, - reltol = nothing, timer = get_timer_output(), kwargs...) where {INV, N} +function __internal_init(prob::NonlinearProblem, alg::NewtonDescent, J, fu, u; stats, + shared::Val{N} = Val(1), pre_inverted::Val{INV} = False, + linsolve_kwargs = (;), abstol = nothing, reltol = nothing, + timer = get_timer_output(), kwargs...) where {INV, N} @bb δu = similar(u) δus = N ≤ 1 ? nothing : map(2:N) do i @bb δu_ = similar(u) end INV && return NewtonDescentCache{true, false}(δu, δus, nothing, nothing, nothing, timer) lincache = LinearSolverCache( - alg, alg.linsolve, J, _vec(fu), _vec(u); abstol, reltol, linsolve_kwargs...) + alg, alg.linsolve, J, _vec(fu), _vec(u); stats, abstol, reltol, linsolve_kwargs...) return NewtonDescentCache{false, false}(δu, δus, lincache, nothing, nothing, timer) end -function __internal_init(prob::NonlinearLeastSquaresProblem, alg::NewtonDescent, J, - fu, u; pre_inverted::Val{INV} = False, linsolve_kwargs = (;), +function __internal_init(prob::NonlinearLeastSquaresProblem, alg::NewtonDescent, J, fu, + u; stats, pre_inverted::Val{INV} = False, linsolve_kwargs = (;), shared::Val{N} = Val(1), abstol = nothing, reltol = nothing, timer = get_timer_output(), kwargs...) where {INV, N} length(fu) != length(u) && @@ -63,7 +63,7 @@ function __internal_init(prob::NonlinearLeastSquaresProblem, alg::NewtonDescent, A, b = J, _vec(fu) end lincache = LinearSolverCache( - alg, alg.linsolve, A, b, _vec(u); abstol, reltol, linsolve_kwargs...) + alg, alg.linsolve, A, b, _vec(u); stats, abstol, reltol, linsolve_kwargs...) @bb δu = similar(u) δus = N ≤ 1 ? nothing : map(2:N) do i @bb δu_ = similar(u) diff --git a/src/descent/steepest.jl b/src/descent/steepest.jl index 227fdb436..cc2f4d128 100644 --- a/src/descent/steepest.jl +++ b/src/descent/steepest.jl @@ -30,8 +30,8 @@ end @internal_caches SteepestDescentCache :lincache @inline function __internal_init( - prob::AbstractNonlinearProblem, alg::SteepestDescent, J, fu, - u; shared::Val{N} = Val(1), pre_inverted::Val{INV} = False, + prob::AbstractNonlinearProblem, alg::SteepestDescent, J, fu, u; + stats, shared::Val{N} = Val(1), pre_inverted::Val{INV} = False, linsolve_kwargs = (;), abstol = nothing, reltol = nothing, timer = get_timer_output(), kwargs...) where {INV, N} INV && @assert length(fu)==length(u) "Non-Square Jacobian Inverse doesn't make sense." @@ -40,8 +40,8 @@ end @bb δu_ = similar(u) end if INV - lincache = LinearSolverCache(alg, alg.linsolve, transpose(J), _vec(fu), - _vec(u); abstol, reltol, linsolve_kwargs...) + lincache = LinearSolverCache(alg, alg.linsolve, transpose(J), _vec(fu), _vec(u); + stats, abstol, reltol, linsolve_kwargs...) else lincache = nothing end diff --git a/src/globalization/line_search.jl b/src/globalization/line_search.jl index 3cd30424f..038cc119a 100644 --- a/src/globalization/line_search.jl +++ b/src/globalization/line_search.jl @@ -93,12 +93,12 @@ end grad_op u_cache fu_cache - nf::Base.RefValue{Int} + stats::NLStats end function __internal_init( prob::AbstractNonlinearProblem, alg::LineSearchesJL, f::F, fu, u, p, - args...; internalnorm::IN = DEFAULT_NORM, kwargs...) where {F, IN} + args...; stats, internalnorm::IN = DEFAULT_NORM, kwargs...) where {F, IN} T = promote_type(eltype(fu), eltype(u)) if u isa Number autodiff = get_concrete_forward_ad(alg.autodiff, prob; check_forward_mode = true) @@ -135,19 +135,18 @@ function __internal_init( @bb u_cache = similar(u) @bb fu_cache = similar(fu) - nf = Base.RefValue(0) ϕ = @closure (f, p, u, du, α, u_cache, fu_cache) -> begin @bb @. u_cache = u + α * du fu_cache = evaluate_f!!(f, fu_cache, u_cache, p) - nf[] += 1 + stats.nf += 1 return @fastmath internalnorm(fu_cache)^2 / 2 end dϕ = @closure (f, p, u, du, α, u_cache, fu_cache, grad_op) -> begin @bb @. u_cache = u + α * du fu_cache = evaluate_f!!(f, fu_cache, u_cache, p) - nf[] += 1 + stats.nf += 1 g₀ = grad_op(u_cache, fu_cache, p) return dot(g₀, du) end @@ -155,14 +154,14 @@ function __internal_init( ϕdϕ = @closure (f, p, u, du, α, u_cache, fu_cache, grad_op) -> begin @bb @. u_cache = u + α * du fu_cache = evaluate_f!!(f, fu_cache, u_cache, p) - nf[] += 1 + stats.nf += 1 g₀ = grad_op(u_cache, fu_cache, p) obj = @fastmath internalnorm(fu_cache)^2 / 2 return obj, dot(g₀, du) end - return LineSearchesJLCache( - f, p, ϕ, dϕ, ϕdϕ, alg.method, T(alg.initial_alpha), grad_op, u_cache, fu_cache, nf) + return LineSearchesJLCache(f, p, ϕ, dϕ, ϕdϕ, alg.method, T(alg.initial_alpha), + grad_op, u_cache, fu_cache, stats) end function __internal_solve!(cache::LineSearchesJLCache, u, du; kwargs...) @@ -248,21 +247,20 @@ end nsteps::Int η_strategy n_exp::Int - nf::Base.RefValue{Int} + stats::NLStats end function __internal_init( - prob::AbstractNonlinearProblem, alg::RobustNonMonotoneLineSearch, f::F, fu, - u, p, args...; internalnorm::IN = DEFAULT_NORM, kwargs...) where {F, IN} + prob::AbstractNonlinearProblem, alg::RobustNonMonotoneLineSearch, f::F, fu, u, + p, args...; stats, internalnorm::IN = DEFAULT_NORM, kwargs...) where {F, IN} @bb u_cache = similar(u) @bb fu_cache = similar(fu) T = promote_type(eltype(fu), eltype(u)) - nf = Base.RefValue(0) ϕ = @closure (f, p, u, du, α, u_cache, fu_cache) -> begin @bb @. u_cache = u + α * du fu_cache = evaluate_f!!(f, fu_cache, u_cache, p) - nf[] += 1 + stats.nf += 1 return internalnorm(fu_cache)^alg.n_exp end @@ -272,7 +270,7 @@ function __internal_init( return RobustNonMonotoneLineSearchCache( f, p, ϕ, u_cache, fu_cache, internalnorm, alg.maxiters, fill(fn₁, alg.M), T(alg.gamma), T(alg.sigma_1), alg.M, - T(alg.tau_min), T(alg.tau_max), 0, η_strategy, alg.n_exp, nf) + T(alg.tau_min), T(alg.tau_max), 0, η_strategy, alg.n_exp, stats) end function __internal_solve!(cache::RobustNonMonotoneLineSearchCache, u, du; kwargs...) @@ -341,28 +339,27 @@ end α nan_maxiters::Int maxiters::Int - nf::Base.RefValue{Int} + stats::NLStats end function __internal_init( - prob::AbstractNonlinearProblem, alg::LiFukushimaLineSearch, f::F, fu, u, - p, args...; internalnorm::IN = DEFAULT_NORM, kwargs...) where {F, IN} + prob::AbstractNonlinearProblem, alg::LiFukushimaLineSearch, f::F, fu, u, p, + args...; stats, internalnorm::IN = DEFAULT_NORM, kwargs...) where {F, IN} @bb u_cache = similar(u) @bb fu_cache = similar(fu) T = promote_type(eltype(fu), eltype(u)) - nf = Base.RefValue(0) ϕ = @closure (f, p, u, du, α, u_cache, fu_cache) -> begin @bb @. u_cache = u + α * du fu_cache = evaluate_f!!(f, fu_cache, u_cache, p) - nf[] += 1 + stats.nf += 1 return internalnorm(fu_cache) end return LiFukushimaLineSearchCache( ϕ, f, p, internalnorm, u_cache, fu_cache, T(alg.lambda_0), T(alg.beta), T(alg.sigma_1), T(alg.sigma_2), T(alg.eta), - T(alg.rho), T(true), alg.nan_max_iter, alg.maxiters, nf) + T(alg.rho), T(true), alg.nan_max_iter, alg.maxiters, stats) end function __internal_solve!(cache::LiFukushimaLineSearchCache, u, du; kwargs...) diff --git a/src/globalization/trust_region.jl b/src/globalization/trust_region.jl index 6004695ec..51b54959e 100644 --- a/src/globalization/trust_region.jl +++ b/src/globalization/trust_region.jl @@ -43,7 +43,7 @@ end last_step_accepted::Bool u_cache fu_cache - nf::Int + stats::NLStats end function reinit_cache!(cache::LevenbergMarquardtTrustRegionCache, args...; @@ -53,18 +53,17 @@ function reinit_cache!(cache::LevenbergMarquardtTrustRegionCache, args...; cache.loss_old = oftype(cache.loss_old, Inf) cache.norm_v_old = oftype(cache.norm_v_old, Inf) cache.last_step_accepted = false - cache.nf = 0 end function __internal_init( - prob::AbstractNonlinearProblem, alg::LevenbergMarquardtTrustRegion, f::F, - fu, u, p, args...; internalnorm::IF = DEFAULT_NORM, kwargs...) where {F, IF} + prob::AbstractNonlinearProblem, alg::LevenbergMarquardtTrustRegion, f::F, fu, + u, p, args...; stats, internalnorm::IF = DEFAULT_NORM, kwargs...) where {F, IF} T = promote_type(eltype(u), eltype(fu)) @bb v = copy(u) @bb u_cache = similar(u) @bb fu_cache = similar(fu) - return LevenbergMarquardtTrustRegionCache( - f, p, T(Inf), v, T(Inf), internalnorm, alg.β_uphill, false, u_cache, fu_cache, 0) + return LevenbergMarquardtTrustRegionCache(f, p, T(Inf), v, T(Inf), internalnorm, + alg.β_uphill, false, u_cache, fu_cache, stats) end function __internal_solve!( @@ -76,7 +75,7 @@ function __internal_solve!( @bb @. cache.u_cache = u + δu cache.fu_cache = evaluate_f!!(cache.f, cache.fu_cache, cache.u_cache, cache.p) - cache.nf += 1 + cache.stats.nf += 1 loss = cache.internalnorm(cache.fu_cache) @@ -282,7 +281,7 @@ end fu_cache last_step_accepted::Bool shrink_counter::Int - nf::Int + stats::NLStats alg end @@ -298,7 +297,6 @@ function reinit_cache!( end cache.last_step_accepted = false cache.shrink_counter = 0 - cache.nf = 0 end # Defaults @@ -368,8 +366,8 @@ end @inline __expand_factor(::Nothing, ::Type{T}, method) where {T} = T(2) function __internal_init( - prob::AbstractNonlinearProblem, alg::GenericTrustRegionScheme, f::F, fu, - u, p, args...; internalnorm::IF = DEFAULT_NORM, kwargs...) where {F, IF} + prob::AbstractNonlinearProblem, alg::GenericTrustRegionScheme, f::F, fu, u, + p, args...; stats, internalnorm::IF = DEFAULT_NORM, kwargs...) where {F, IF} T = promote_type(eltype(u), eltype(fu)) u0_norm = internalnorm(u) fu_norm = internalnorm(fu) @@ -419,7 +417,7 @@ function __internal_init( alg.method, f, p, max_trust_radius, initial_trust_radius, initial_trust_radius, step_threshold, shrink_threshold, expand_threshold, shrink_factor, expand_factor, p1, p2, p3, p4, ϵ, T(0), vjp_operator, jvp_operator, Jᵀfu_cache, - Jδu_cache, δu_cache, internalnorm, u_cache, fu_cache, false, 0, 0, alg) + Jδu_cache, δu_cache, internalnorm, u_cache, fu_cache, false, 0, stats, alg) end function __internal_solve!( @@ -427,7 +425,7 @@ function __internal_solve!( T = promote_type(eltype(u), eltype(fu)) @bb @. cache.u_cache = u + δu cache.fu_cache = evaluate_f!!(cache.f, cache.fu_cache, cache.u_cache, cache.p) - cache.nf += 1 + cache.stats.nf += 1 if hasfield(typeof(descent_stats), :δuJᵀJδu) && !isnan(descent_stats.δuJᵀJδu) δuJᵀJδu = descent_stats.δuJᵀJδu diff --git a/src/internal/approximate_initialization.jl b/src/internal/approximate_initialization.jl index a1f36f475..4f17cd72f 100644 --- a/src/internal/approximate_initialization.jl +++ b/src/internal/approximate_initialization.jl @@ -145,12 +145,12 @@ make a selection automatically. autodiff end -function __internal_init( - prob::AbstractNonlinearProblem, alg::TrueJacobianInitialization, solver, f::F, fu, - u, p; linsolve = missing, internalnorm::IN = DEFAULT_NORM, kwargs...) where {F, IN} +function __internal_init(prob::AbstractNonlinearProblem, alg::TrueJacobianInitialization, + solver, f::F, fu, u, p; stats, linsolve = missing, + internalnorm::IN = DEFAULT_NORM, kwargs...) where {F, IN} autodiff = get_concrete_forward_ad( alg.autodiff, prob; check_forward_mode = false, kwargs...) - jac_cache = JacobianCache(prob, solver, prob.f, fu, u, p; autodiff, linsolve) + jac_cache = JacobianCache(prob, solver, prob.f, fu, u, p; stats, autodiff, linsolve) J = alg.structure(jac_cache(nothing)) return InitializedApproximateJacobianCache( J, alg.structure, alg, jac_cache, false, internalnorm) diff --git a/src/internal/helpers.jl b/src/internal/helpers.jl index 9a18d0817..c5241481a 100644 --- a/src/internal/helpers.jl +++ b/src/internal/helpers.jl @@ -12,7 +12,7 @@ function evaluate_f(prob::AbstractNonlinearProblem{uType, iip}, u) where {uType, end function evaluate_f!(cache, u, p) - cache.nf += 1 + cache.stats.nf += 1 if isinplace(cache) cache.prob.f(get_fu(cache), u, p) else @@ -168,7 +168,8 @@ end function __construct_extension_jac(prob, alg, u0, fu; can_handle_oop::Val = False, can_handle_scalar::Val = False, kwargs...) - Jₚ = JacobianCache(prob, alg, prob.f, fu, u0, prob.p; kwargs...) + Jₚ = JacobianCache( + prob, alg, prob.f, fu, u0, prob.p; stats = empty_nlstats(), kwargs...) 𝓙 = (can_handle_scalar === False && prob.u0 isa Number) ? @closure(u->[Jₚ(u[1])]) : Jₚ @@ -178,21 +179,6 @@ function __construct_extension_jac(prob, alg, u0, fu; can_handle_oop::Val = Fals return 𝐉 end -# Query Statistics -for stat in (:nsolve, :nfactors, :nsteps, :njacs, :nf) - fname = Symbol("get_$(stat)") - @eval @inline $(fname)(cache) = __query_stat(cache, $(Val(stat))) -end - -@inline __query_stat(cache, stat::Val) = __direct_query_stat(cache, stat) -@inline @generated function __direct_query_stat(cache::T, ::Val{stat}) where {T, stat} - hasfield(T, stat) || return :(0) - return :(__get_data(cache.$(stat))) -end - -@inline __get_data(x::Number) = x -@inline __get_data(x::Base.RefValue{Int}) = x[] - function reinit_cache! end reinit_cache!(cache::Nothing, args...; kwargs...) = nothing reinit_cache!(cache, args...; kwargs...) = nothing @@ -203,12 +189,10 @@ __reinit_internal!(cache, args...; kwargs...) = nothing # Auto-generate some of the helper functions macro internal_caches(cType, internal_cache_names...) - return __internal_caches(__source__, __module__, cType, internal_cache_names) + return __internal_caches(cType, internal_cache_names) end -function __internal_caches(__source__, __module__, cType, internal_cache_names::Tuple) - fields = map( - name -> :($(__query_stat)(getproperty(cache, $(name)), ST)), internal_cache_names) +function __internal_caches(cType, internal_cache_names::Tuple) callback_caches = map( name -> :($(callback_into_cache!)( cache, getproperty(internalcache, $(name)), internalcache, args...)), @@ -221,13 +205,6 @@ function __internal_caches(__source__, __module__, cType, internal_cache_names:: name -> :($(reinit_cache!)(getproperty(cache, $(name)), args...; kwargs...)), internal_cache_names) return esc(quote - function __query_stat(cache::$(cType), ST::Val{stat}) where {stat} - val = $(__direct_query_stat)(cache, ST) - return +($(fields...)) + val - end - function __query_stat(cache::$(cType), ST::Val{:nsteps}) - return $(__direct_query_stat)(cache, ST) - end function callback_into_cache!(cache, internalcache::$(cType), args...) $(callback_caches...) end diff --git a/src/internal/jacobian.jl b/src/internal/jacobian.jl index b9cd6d352..2abd89336 100644 --- a/src/internal/jacobian.jl +++ b/src/internal/jacobian.jl @@ -36,7 +36,7 @@ Construct a cache for the Jacobian of `f` w.r.t. `u`. p jac_cache alg - njacs::Int + stats::NLStats autodiff vjp_autodiff jvp_autodiff @@ -44,15 +44,13 @@ end function reinit_cache!(cache::JacobianCache{iip}, args...; p = cache.p, u0 = cache.u, kwargs...) where {iip} - cache.njacs = 0 cache.u = u0 cache.p = p cache.uf = JacobianWrapper{iip}(cache.f, p) end -function JacobianCache( - prob, alg, f::F, fu_, u, p; autodiff = nothing, vjp_autodiff = nothing, - jvp_autodiff = nothing, linsolve = missing) where {F} +function JacobianCache(prob, alg, f::F, fu_, u, p; stats, autodiff = nothing, + vjp_autodiff = nothing, jvp_autodiff = nothing, linsolve = missing) where {F} iip = isinplace(prob) uf = JacobianWrapper{iip}(f, p) @@ -94,11 +92,11 @@ function JacobianCache( end return JacobianCache{iip}( - J, f, uf, fu, u, p, jac_cache, alg, 0, autodiff, vjp_autodiff, jvp_autodiff) + J, f, uf, fu, u, p, jac_cache, alg, stats, autodiff, vjp_autodiff, jvp_autodiff) end -function JacobianCache( - prob, alg, f::F, ::Number, u::Number, p; autodiff = nothing, kwargs...) where {F} +function JacobianCache(prob, alg, f::F, ::Number, u::Number, p; stats, + autodiff = nothing, kwargs...) where {F} uf = JacobianWrapper{false}(f, p) autodiff = get_concrete_forward_ad(autodiff, prob; check_forward_mode = false) if !(autodiff isa AutoForwardDiff || @@ -110,7 +108,7 @@ function JacobianCache( Detected $(autodiff). Falling back to AutoFiniteDiff." end return JacobianCache{false}( - u, f, uf, u, u, p, nothing, alg, 0, autodiff, nothing, nothing) + u, f, uf, u, u, p, nothing, alg, stats, autodiff, nothing, nothing) end @inline (cache::JacobianCache)(u = cache.u) = cache(cache.J, u, cache.p) @@ -124,14 +122,14 @@ function (cache::JacobianCache)(J::JacobianOperator, u, p = cache.p) return StatefulJacobianOperator(J, u, p) end function (cache::JacobianCache)(::Number, u, p = cache.p) # Scalar - cache.njacs += 1 + cache.stats.njacs += 1 J = last(__value_derivative(cache.autodiff, cache.uf, u)) return J end # Compute the Jacobian function (cache::JacobianCache{iip})( J::Union{AbstractMatrix, Nothing}, u, p = cache.p) where {iip} - cache.njacs += 1 + cache.stats.njacs += 1 if iip if has_jac(cache.f) cache.f.jac(J, u, p) @@ -157,20 +155,14 @@ end @inline function __sparsity_detection_alg(f::NonlinearFunction, ad::AutoSparse) if f.sparsity === nothing if f.jac_prototype === nothing - if is_extension_loaded(Val(:Symbolics)) - return SymbolicsSparsityDetection() - else - return ApproximateJacobianSparsity() - end + is_extension_loaded(Val(:Symbolics)) && return SymbolicsSparsityDetection() + return ApproximateJacobianSparsity() else jac_prototype = f.jac_prototype end elseif f.sparsity isa AbstractSparsityDetection - if f.jac_prototype === nothing - return f.sparsity - else - jac_prototype = f.jac_prototype - end + f.jac_prototype === nothing && return f.sparsity + jac_prototype = f.jac_prototype elseif f.sparsity isa AbstractMatrix jac_prototype = f.sparsity elseif f.jac_prototype isa AbstractMatrix diff --git a/src/internal/linear_solve.jl b/src/internal/linear_solve.jl index 783bf4956..27e6a780f 100644 --- a/src/internal/linear_solve.jl +++ b/src/internal/linear_solve.jl @@ -1,10 +1,8 @@ -import LinearSolve: AbstractFactorization, DefaultAlgorithmChoice, DefaultLinearSolver - const LinearSolveFailureCode = isdefined(ReturnCode, :InternalLinearSolveFailure) ? ReturnCode.InternalLinearSolveFailure : ReturnCode.Failure """ - LinearSolverCache(alg, linsolve, A, b, u; kwargs...) + LinearSolverCache(alg, linsolve, A, b, u; stats, kwargs...) Construct a cache for solving linear systems of the form `A * u = b`. Following cases are handled: @@ -52,14 +50,7 @@ not mutated, we do this by copying over `A` to a preconstructed cache. A b precs - nsolve::Int - nfactors::Int -end - -# FIXME: Do we need to reinit the precs? -function reinit_cache!(cache::LinearSolverCache, args...; kwargs...) - cache.nsolve = 0 - cache.nfactors = 0 + stats::NLStats end @inline __fix_strange_type_combination(A, b, u) = u @@ -77,14 +68,14 @@ end cache.lincache.u = u end -function LinearSolverCache(alg, linsolve, A, b, u; kwargs...) +function LinearSolverCache(alg, linsolve, A, b, u; stats, kwargs...) u_fixed = __fix_strange_type_combination(A, b, u) if (A isa Number && b isa Number) || (linsolve === nothing && A isa SMatrix) || (A isa Diagonal) || (linsolve isa typeof(\)) - return LinearSolverCache(nothing, nothing, nothing, A, b, nothing, 0, 0) + return LinearSolverCache(nothing, nothing, nothing, A, b, nothing, stats) end @bb u_ = copy(u_fixed) linprob = LinearProblem(A, b; u0 = u_, kwargs...) @@ -92,8 +83,7 @@ function LinearSolverCache(alg, linsolve, A, b, u; kwargs...) weight = __init_ones(u_fixed) if __hasfield(alg, Val(:precs)) precs = alg.precs - Pl_, Pr_ = precs( - A, nothing, u, nothing, nothing, nothing, nothing, nothing, nothing) + Pl_, Pr_ = precs(A, nothing, u, ntuple(Returns(nothing), 6)...) else precs, Pl_, Pr_ = nothing, nothing, nothing end @@ -102,7 +92,7 @@ function LinearSolverCache(alg, linsolve, A, b, u; kwargs...) # Unalias here, we will later use these as caches lincache = init(linprob, linsolve; alias_A = false, alias_b = false, Pl, Pr) - return LinearSolverCache(lincache, linsolve, nothing, nothing, nothing, precs, 0, 0) + return LinearSolverCache(lincache, linsolve, nothing, nothing, nothing, precs, stats) end @kwdef @concrete struct LinearSolveResult @@ -113,8 +103,8 @@ end # Direct Linear Solve Case without Caching function (cache::LinearSolverCache{Nothing})(; A = nothing, b = nothing, linu = nothing, kwargs...) - cache.nsolve += 1 - cache.nfactors += 1 + cache.stats.nsolve += 1 + cache.stats.nfactors += 1 A === nothing || (cache.A = A) b === nothing || (cache.b = b) if A isa Diagonal @@ -132,7 +122,7 @@ function (cache::LinearSolverCache)(; A = nothing, b = nothing, linu = nothing, du = nothing, p = nothing, weight = nothing, cachedata = nothing, reuse_A_if_factorization = false, verbose = true, kwargs...) - cache.nsolve += 1 + cache.stats.nsolve += 1 __update_A!(cache, A, reuse_A_if_factorization) b !== nothing && (cache.lincache.b = b) @@ -224,7 +214,7 @@ end @inline function __update_A!(cache, ::AbstractFactorization, A, reuse) reuse && return cache __set_lincache_A(cache.lincache, A) - cache.nfactors += 1 + cache.stats.nfactors += 1 return cache end @inline function __update_A!(cache, alg::DefaultLinearSolver, A, reuse) @@ -235,7 +225,7 @@ end end reuse && return cache __set_lincache_A(cache.lincache, A) - cache.nfactors += 1 + cache.stats.nfactors += 1 return cache end @@ -253,17 +243,12 @@ function __set_lincache_A(lincache, new_A) end @inline function __wrapprecs(_Pl, _Pr, weight) - if _Pl !== nothing - Pl = ComposePreconditioner(InvPreconditioner(Diagonal(_vec(weight))), _Pl) - else - Pl = InvPreconditioner(Diagonal(_vec(weight))) - end + Pl = _Pl !== nothing ? + ComposePreconditioner(InvPreconditioner(Diagonal(_vec(weight))), _Pl) : + InvPreconditioner(Diagonal(_vec(weight))) - if _Pr !== nothing - Pr = ComposePreconditioner(Diagonal(_vec(weight)), _Pr) - else - Pr = Diagonal(_vec(weight)) - end + Pr = _Pr !== nothing ? ComposePreconditioner(Diagonal(_vec(weight)), _Pr) : + Diagonal(_vec(weight)) return Pl, Pr end diff --git a/src/internal/operators.jl b/src/internal/operators.jl index 74667dbdb..8c3c8922c 100644 --- a/src/internal/operators.jl +++ b/src/internal/operators.jl @@ -34,13 +34,9 @@ end Base.size(J::JacobianOperator) = prod(size(J.output_cache)), prod(size(J.input_cache)) function Base.size(J::JacobianOperator, d::Integer) - if d == 1 - return prod(size(J.output_cache)) - elseif d == 2 - return prod(size(J.input_cache)) - else - error("Invalid dimension $d for JacobianOperator") - end + d == 1 && return prod(size(J.output_cache)) + d == 2 && return prod(size(J.input_cache)) + error("Invalid dimension $d for JacobianOperator") end for op in (:adjoint, :transpose) diff --git a/src/internal/tracing.jl b/src/internal/tracing.jl index 75fcaa66a..fe35f1a9c 100644 --- a/src/internal/tracing.jl +++ b/src/internal/tracing.jl @@ -207,12 +207,11 @@ function update_trace!(cache::AbstractNonlinearSolveCache, α = true) J = __getproperty(cache, Val(:J)) if J === nothing update_trace!( - trace, get_nsteps(cache) + 1, get_u(cache), get_fu(cache), nothing, cache.du, α) + trace, cache.nsteps + 1, get_u(cache), get_fu(cache), nothing, cache.du, α) elseif cache isa ApproximateJacobianSolveCache && store_inverse_jacobian(cache) - update_trace!(trace, get_nsteps(cache) + 1, get_u(cache), - get_fu(cache), ApplyArray(__safe_inv, J), cache.du, α) + update_trace!(trace, cache.nsteps + 1, get_u(cache), get_fu(cache), + ApplyArray(__safe_inv, J), cache.du, α) else - update_trace!( - trace, get_nsteps(cache) + 1, get_u(cache), get_fu(cache), J, cache.du, α) + update_trace!(trace, cache.nsteps + 1, get_u(cache), get_fu(cache), J, cache.du, α) end end diff --git a/src/utils.jl b/src/utils.jl index beda82460..d13555259 100644 --- a/src/utils.jl +++ b/src/utils.jl @@ -129,42 +129,6 @@ end @inline __dot(x, y) = dot(_vec(x), _vec(y)) -# Return an ImmutableNLStats object when we know that NLStats won't be updated -""" - ImmutableNLStats(nf, njacs, nfactors, nsolve, nsteps) - -Statistics from the nonlinear equation solver about the solution process. - -## Fields - - - nf: Number of function evaluations. - - njacs: Number of Jacobians created during the solve. - - nfactors: Number of factorzations of the jacobian required for the solve. - - nsolve: Number of linear solves `W \\ b` required for the solve. - - nsteps: Total number of iterations for the nonlinear solver. -""" -struct ImmutableNLStats - nf::Int - njacs::Int - nfactors::Int - nsolve::Int - nsteps::Int -end - -function Base.show(io::IO, ::MIME"text/plain", s::ImmutableNLStats) - println(io, summary(s)) - @printf io "%-50s %-d\n" "Number of function evaluations:" s.nf - @printf io "%-50s %-d\n" "Number of Jacobians created:" s.njacs - @printf io "%-50s %-d\n" "Number of factorizations:" s.nfactors - @printf io "%-50s %-d\n" "Number of linear solves:" s.nsolve - @printf io "%-50s %-d" "Number of nonlinear solver iterations:" s.nsteps -end - -function Base.merge(s1::ImmutableNLStats, s2::ImmutableNLStats) - return ImmutableNLStats(s1.nf + s2.nf, s1.njacs + s2.njacs, s1.nfactors + s2.nfactors, - s1.nsolve + s2.nsolve, s1.nsteps + s2.nsteps) -end - """ pickchunksize(x) = pickchunksize(length(x)) pickchunksize(x::Int) @@ -182,7 +146,17 @@ function __build_solution_less_specialize(prob::AbstractNonlinearProblem, alg, u T = eltype(eltype(u)) N = ndims(u) - return SciMLBase.NonlinearSolution{T, N, typeof(u), typeof(resid), typeof(prob), - typeof(alg), Any, typeof(left), Any, typeof(trace)}( + return SciMLBase.NonlinearSolution{ + T, N, typeof(u), typeof(resid), typeof(prob), typeof(alg), + Any, typeof(left), typeof(stats), typeof(trace)}( u, resid, prob, alg, retcode, original, left, right, stats, trace) end + +@inline empty_nlstats() = NLStats(0, 0, 0, 0, 0) +function __reinit_internal!(stats::NLStats) + stats.nf = 0 + stats.nsteps = 0 + stats.nfactors = 0 + stats.njacs = 0 + stats.nsolve = 0 +end