From 28893bdad109de35989fdd949c87745eb0d64b4d Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Mon, 29 May 2023 12:23:55 -0700 Subject: [PATCH 01/19] Rework default algorithm to be fully type stable Two possible ideas. One is to put all algorithms and caches in the default algorithm into a unityper. The other is to make a "mega algorithm" with runtime information on the choice. This takes the latter approach because that keeps most of the package code intact and makes it so that any algorithm choice by the user will not have any runtime behavior. This uses an enum inside of the algorithm struct in order to choose the actual solver for the given process. In init and solve static dispatching is done through hardcoded branches. Todo: - [ ] Finish the implementation to make it actually work (check the generated functions) - [ ] Make the OperatorAssumptions be dynamic information - [ ] Test and time to see the real overhead - [ ] Munge the output into a solution struct that doesn't have the actual algorithm so that the output is type stable Things to consider: The one thing that may be a blocker is the `init` cost. While I don't think it will be an issue, we will need to see if constructing all of the empty arrays simply to hold a thing of the right type is too costly. Basically, what we may need is for `Float64[]` to be optimized to be a no-op zero allocation, in which case essentially the whole cache structure should be free to build. As it stands, this may be a cause of overhead as it needs to build all of the potential caches even if it only ever uses one. --- src/default.jl | 206 +++++++++++++++++++++++++++++++------------------ 1 file changed, 133 insertions(+), 73 deletions(-) diff --git a/src/default.jl b/src/default.jl index 8a0bffde2..118c32256 100644 --- a/src/default.jl +++ b/src/default.jl @@ -1,123 +1,144 @@ +struct DefaultLinearSolver <: SciMLLinearSolveAlgorithm + alg::DefaultAlgorithmChoice +end + +EnumX.@enumx DefaultAlgorithmChoice begin + LUFactorization + QRFactorization + DiagonalFactorization + DirectLdiv! + SparspakFactorization + KLUFactorization + UMFPACKFactorization + KrylovJL_GMRES + GenericLUFactorization + RowMaximumGenericLUFactorization + RFLUFactorization + PivotedRFLUFactorization + LDLtFactorization + SVDFactorization +end + # Legacy fallback # For SciML algorithms already using `defaultalg`, all assume square matrix. defaultalg(A, b) = defaultalg(A, b, OperatorAssumptions(Val(true))) function defaultalg(A::Union{DiffEqArrayOperator, MatrixOperator}, b, assumptions::OperatorAssumptions) - defaultalg(A.A, b, assumptions) + DefaultLinearSolver(defaultalg(A.A, b, assumptions)) end # Ambiguity handling function defaultalg(A::Union{DiffEqArrayOperator, MatrixOperator}, b, assumptions::OperatorAssumptions{nothing}) - defaultalg(A.A, b, assumptions) + DefaultLinearSolver(defaultalg(A.A, b, assumptions)) end function defaultalg(A::Union{DiffEqArrayOperator, MatrixOperator}, b, assumptions::OperatorAssumptions{false}) - defaultalg(A.A, b, assumptions) + DefaultLinearSolver(defaultalg(A.A, b, assumptions)) end function defaultalg(A::Union{DiffEqArrayOperator, MatrixOperator}, b, assumptions::OperatorAssumptions{true}) - defaultalg(A.A, b, assumptions) + DefaultLinearSolver(defaultalg(A.A, b, assumptions)) end function defaultalg(A, b, ::OperatorAssumptions{Nothing}) issq = issquare(A) - defaultalg(A, b, OperatorAssumptions(Val(issq))) + DefaultLinearSolver(defaultalg(A, b, OperatorAssumptions(Val(issq)))) end function defaultalg(A::Tridiagonal, b, ::OperatorAssumptions{true}) - GenericFactorization(; fact_alg = lu!) + DefaultAlgorithmChoice.LUFactorization end function defaultalg(A::Tridiagonal, b, ::OperatorAssumptions{false}) - GenericFactorization(; fact_alg = qr!) + DefaultAlgorithmChoice.QRFactorization end function defaultalg(A::SymTridiagonal, b, ::OperatorAssumptions{true}) - GenericFactorization(; fact_alg = ldlt!) + DefaultAlgorithmChoice.LDLtFactorization end function defaultalg(A::Bidiagonal, b, ::OperatorAssumptions{true}) - DirectLdiv!() + DefaultAlgorithmChoice.DirectLdiv! end function defaultalg(A::Factorization, b, ::OperatorAssumptions{true}) - DirectLdiv!() + DefaultAlgorithmChoice.DirectLdiv! end function defaultalg(A::Diagonal, b, ::OperatorAssumptions{true}) - DiagonalFactorization() + DefaultAlgorithmChoice.DiagonalFactorization end function defaultalg(A::Diagonal, b, ::OperatorAssumptions{Nothing}) - DiagonalFactorization() + DefaultAlgorithmChoice.DiagonalFactorization end function defaultalg(A::AbstractSparseMatrixCSC{Tv, Ti}, b, ::OperatorAssumptions{true}) where {Tv, Ti} - SparspakFactorization() + DefaultAlgorithmChoice.SparspakFactorization end @static if INCLUDE_SPARSE function defaultalg(A::AbstractSparseMatrixCSC{<:Union{Float64, ComplexF64}, Ti}, b, ::OperatorAssumptions{true}) where {Ti} if length(b) <= 10_000 - KLUFactorization() + DefaultAlgorithmChoice.KLUFactorization else - UMFPACKFactorization() + DefaultAlgorithmChoice.UMFPACKFactorization end end end function defaultalg(A::GPUArraysCore.AbstractGPUArray, b, assump::OperatorAssumptions{true}) if VERSION >= v"1.8-" - LUFactorization() + DefaultAlgorithmChoice.LUFactorization else - QRFactorization() + DefaultAlgorithmChoice.QRFactorization end end function defaultalg(A::GPUArraysCore.AbstractGPUArray, b, assump::OperatorAssumptions{true, OperatorCondition.IllConditioned}) - QRFactorization() + DefaultAlgorithmChoice.QRFactorization end function defaultalg(A, b::GPUArraysCore.AbstractGPUArray, assump::OperatorAssumptions{true}) if VERSION >= v"1.8-" - LUFactorization() + DefaultAlgorithmChoice.LUFactorization else - QRFactorization() + DefaultAlgorithmChoice.QRFactorization end end function defaultalg(A, b::GPUArraysCore.AbstractGPUArray, assump::OperatorAssumptions{true, OperatorCondition.IllConditioned}) - QRFactorization() + DefaultAlgorithmChoice.QRFactorization end function defaultalg(A::SciMLBase.AbstractSciMLOperator, b, assumptions::OperatorAssumptions{true}) if has_ldiv!(A) - return DirectLdiv!() + return DefaultAlgorithmChoice.DirectLdiv! end - KrylovJL_GMRES() + DefaultAlgorithmChoice.KrylovJL_GMRES end # Ambiguity handling function defaultalg(A::SciMLBase.AbstractSciMLOperator, b, assumptions::OperatorAssumptions{Nothing}) if has_ldiv!(A) - return DirectLdiv!() + return DefaultAlgorithmChoice.DirectLdiv! end - KrylovJL_GMRES() + DefaultAlgorithmChoice.KrylovJL_GMRES end function defaultalg(A::SciMLBase.AbstractSciMLOperator, b, assumptions::OperatorAssumptions{false}) m, n = size(A) if m < n - KrylovJL_CRAIGMR() + DefaultAlgorithmChoice.KrylovJL_CRAIGMR else - KrylovJL_LSMR() + DefaultAlgorithmChoice.KrylovJL_LSMR end end @@ -125,29 +146,29 @@ end function defaultalg(A::GPUArraysCore.AbstractGPUArray, b::GPUArraysCore.AbstractGPUArray, ::OperatorAssumptions{true}) if VERSION >= v"1.8-" - LUFactorization() + DefaultAlgorithmChoice.LUFactorization else - QRFactorization() + DefaultAlgorithmChoice.QRFactorization end end function defaultalg(A::GPUArraysCore.AbstractGPUArray, b::GPUArraysCore.AbstractGPUArray, ::OperatorAssumptions{true, OperatorCondition.IllConditioned}) - QRFactorization() + DefaultAlgorithmChoice.QRFactorization end function defaultalg(A::GPUArraysCore.AbstractGPUArray, b, ::OperatorAssumptions{false}) - QRFactorization() + DefaultAlgorithmChoice.QRFactorization end function defaultalg(A, b::GPUArraysCore.AbstractGPUArray, ::OperatorAssumptions{false}) - QRFactorization() + DefaultAlgorithmChoice.QRFactorization end # Handle ambiguity function defaultalg(A::GPUArraysCore.AbstractGPUArray, b::GPUArraysCore.AbstractGPUArray, ::OperatorAssumptions{false}) - QRFactorization() + DefaultAlgorithmChoice.QRFactorization end # Allows A === nothing as a stand-in for dense matrix @@ -161,83 +182,60 @@ function defaultalg(A, b, assump::OperatorAssumptions{true}) (__conditioning(assump) === OperatorCondition.IllConditioned || __conditioning(assump) === OperatorCondition.WellConditioned) if length(b) <= 10 - pivot = @static if VERSION < v"1.7beta" - if __conditioning(assump) === OperatorCondition.IllConditioned - Val(true) - else - Val(false) - end + if __conditioning(assump) === OperatorCondition.IllConditioned + alg = DefaultAlgorithmChoice.RowMaximumGenericLUFactorization else - if __conditioning(assump) === OperatorCondition.IllConditioned - RowMaximum() - else - RowNonZero() - end - end - alg = GenericLUFactorization(pivot) + alg = DefaultAlgorithmChoice.GenericLUFactorization + end elseif (length(b) <= 100 || (isopenblas() && length(b) <= 500)) && (A === nothing ? eltype(b) <: Union{Float32, Float64} : eltype(A) <: Union{Float32, Float64}) - pivot = if __conditioning(assump) === OperatorCondition.IllConditioned - Val(true) - else - Val(false) - end - alg = RFLUFactorization(; pivot = pivot) + DefaultAlgorithmChoice.RFLUFactorization #elseif A === nothing || A isa Matrix # alg = FastLUFactorization() else - pivot = @static if VERSION < v"1.7beta" - if __conditioning(assump) === OperatorCondition.IllConditioned - Val(true) - else - Val(false) - end + if __conditioning(assump) === OperatorCondition.IllConditioned + alg = DefaultAlgorithmChoice.RowMaximumGenericLUFactorization else - if __conditioning(assump) === OperatorCondition.IllConditioned - RowMaximum() - else - RowNonZero() - end + alg = DefaultAlgorithmChoice.GenericLUFactorization end - alg = LUFactorization(pivot) end elseif __conditioning(assump) === OperatorCondition.VeryIllConditioned - alg = QRFactorization() + alg = DefaultAlgorithmChoice.QRFactorization elseif __conditioning(assump) === OperatorCondition.SuperIllConditioned - alg = SVDFactorization(false, LinearAlgebra.QRIteration()) + alg = DefaultAlgorithmChoice.SVDFactorization else - alg = LUFactorization() + alg = DefaultAlgorithmChoice.LUFactorization end # This catches the cases where a factorization overload could exist # For example, BlockBandedMatrix elseif A !== nothing && ArrayInterface.isstructured(A) - alg = GenericFactorization() + alg = DefaultAlgorithmChoice.GenericFactorization # Not factorizable operator, default to only using A*x else - alg = KrylovJL_GMRES() + alg = DefaultAlgorithmChoice.KrylovJL_GMRES end alg end function defaultalg(A, b, ::OperatorAssumptions{false, OperatorCondition.WellConditioned}) - NormalCholeskyFactorization() + DefaultAlgorithmChoice.NormalCholeskyFactorization end function defaultalg(A, b, ::OperatorAssumptions{false, OperatorCondition.IllConditioned}) - QRFactorization() + DefaultAlgorithmChoice.QRFactorization end function defaultalg(A, b, ::OperatorAssumptions{false, OperatorCondition.VeryIllConditioned}) - QRFactorization() + DefaultAlgorithmChoice.QRFactorization end function defaultalg(A, b, ::OperatorAssumptions{false, OperatorCondition.SuperIllConditioned}) - SVDFactorization(false, LinearAlgebra.QRIteration()) + DefaultAlgorithmChoice.SVDFactorization end ## Catch high level interface @@ -263,3 +261,65 @@ function init_cacheval(alg::Nothing, A, b, u, Pl, Pr, maxiters::Int, abstol, rel verbose, assumptions) end + +""" +cache.cacheval = NamedTuple(LUFactorization = cache of LUFactorization, ...) +""" +@generated function init_cacheval(alg::DefaultLinearSolver, A, b, u, Pl, Pr, maxiters::Int, abstol, reltol, + verbose::Bool, assumptions::OperatorAssumptions) + caches = [$alg = :(init_cacheval($alg(), A, b, u, Pl, Pr, maxiters, abstol, reltol, + verbose, + assumptions)) for alg in first.(EnumX.symbol_map(DefaultAlgorithmChoice.T))] + quote + return NamedTuple($caches...) + end +end + +""" +if cache.alg.alg === DefaultAlgorithmChoice.LUFactorization + cache.cacheval[LUFactorization] +else + ... +end +""" +@generated function get_cacheval(cache::LinearCache) + ex = :() + for alg in first.(EnumX.symbol_map(DefaultAlgorithmChoice.T)) + ex = if expr === :() + Expr(:elseif, :(cache.alg.alg === $alg),:(cache.cacheval[$alg])) + else + Expr(:elseif, :(cache.alg.alg === $alg),:(cache.cacheval[$alg]),ex) + end + end + ex = Expr(:if,ex.args...) + + quote + if cache.alg === DefaultLinearSolver + $ex + else + cache.cacheval + end + end +end + +""" +if alg.alg === DefaultAlgorithmChoice.LUFactorization + SciMLBase.solve!(cache, LUFactorization(), args...; kwargs...)) +else + ... +end +""" +function SciMLBase.solve!(cache::LinearCache, alg::DefaultLinearSolver, + args...; assumptions::OperatorAssumptions = OperatorAssumptions(), + kwargs...) + ex = :() + for alg in first.(EnumX.symbol_map(DefaultAlgorithmChoice.T)) + ex = if expr === :() + Expr(:elseif, :(alg.alg === $alg),:(SciMLBase.solve!(cache, $alg(), args...; kwargs...))) + else + Expr(:elseif, :(alg.alg === $alg),:(SciMLBase.solve!(cache, $alg(), args...; kwargs...)),ex) + end + end + ex = Expr(:if,ex.args...) + +end \ No newline at end of file From c73eedd4335bb89560c51c8c82b3fc17a4c75180 Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Tue, 30 May 2023 06:05:41 -0700 Subject: [PATCH 02/19] actually works now --- src/LinearSolve.jl | 2 + src/common.jl | 3 + src/default.jl | 164 +++++++++++++++++++++++++++++-------------- src/factorization.jl | 85 ++++++++++++++-------- 4 files changed, 170 insertions(+), 84 deletions(-) diff --git a/src/LinearSolve.jl b/src/LinearSolve.jl index 89fd28701..2be83aba1 100644 --- a/src/LinearSolve.jl +++ b/src/LinearSolve.jl @@ -77,6 +77,7 @@ isopenblas() = IS_OPENBLAS[] import PrecompileTools +#= PrecompileTools.@compile_workload begin A = rand(4, 4) b = rand(4) @@ -104,6 +105,7 @@ PrecompileTools.@compile_workload begin sol = solve(prob) # in case sparspak is used as default sol = solve(prob, SparspakFactorization()) end +=# export LUFactorization, SVDFactorization, QRFactorization, GenericFactorization, GenericLUFactorization, SimpleLUFactorization, RFLUFactorization, diff --git a/src/common.jl b/src/common.jl index 988db2755..5ac36bb3d 100644 --- a/src/common.jl +++ b/src/common.jl @@ -83,6 +83,9 @@ end function Base.setproperty!(cache::LinearCache, name::Symbol, x) if name === :A setfield!(cache, :isfresh, true) + elseif name === :cacheval && cache.alg isa DefaultLinearSolver + @assert cache.cacheval isa DefaultLinearSolverInit + return setfield!(cache.cacheval, Symbol(cache.alg.alg), x) end setfield!(cache, name, x) end diff --git a/src/default.jl b/src/default.jl index 118c32256..e7b145b81 100644 --- a/src/default.jl +++ b/src/default.jl @@ -1,7 +1,3 @@ -struct DefaultLinearSolver <: SciMLLinearSolveAlgorithm - alg::DefaultAlgorithmChoice -end - EnumX.@enumx DefaultAlgorithmChoice begin LUFactorization QRFactorization @@ -14,11 +10,30 @@ EnumX.@enumx DefaultAlgorithmChoice begin GenericLUFactorization RowMaximumGenericLUFactorization RFLUFactorization - PivotedRFLUFactorization LDLtFactorization SVDFactorization end +struct DefaultLinearSolver <: SciMLLinearSolveAlgorithm + alg::DefaultAlgorithmChoice.T +end + +mutable struct DefaultLinearSolverInit{T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13} + LUFactorization::T1 + QRFactorization::T2 + DiagonalFactorization::T3 + DirectLdiv!::T4 + SparspakFactorization::T5 + KLUFactorization::T6 + UMFPACKFactorization::T7 + KrylovJL_GMRES::T8 + GenericLUFactorization::T9 + RowMaximumGenericLUFactorization::T10 + RFLUFactorization::T11 + LDLtFactorization::T12 + SVDFactorization::T13 +end + # Legacy fallback # For SciML algorithms already using `defaultalg`, all assume square matrix. defaultalg(A, b) = defaultalg(A, b, OperatorAssumptions(Val(true))) @@ -50,95 +65,95 @@ function defaultalg(A, b, ::OperatorAssumptions{Nothing}) end function defaultalg(A::Tridiagonal, b, ::OperatorAssumptions{true}) - DefaultAlgorithmChoice.LUFactorization + DefaultLinearSolver(DefaultAlgorithmChoice.LUFactorization) end function defaultalg(A::Tridiagonal, b, ::OperatorAssumptions{false}) - DefaultAlgorithmChoice.QRFactorization + DefaultLinearSolver(DefaultAlgorithmChoice.QRFactorization) end function defaultalg(A::SymTridiagonal, b, ::OperatorAssumptions{true}) - DefaultAlgorithmChoice.LDLtFactorization + DefaultLinearSolver(DefaultAlgorithmChoice.LDLtFactorization) end function defaultalg(A::Bidiagonal, b, ::OperatorAssumptions{true}) - DefaultAlgorithmChoice.DirectLdiv! + DefaultLinearSolver(DefaultAlgorithmChoice.DirectLdiv!) end function defaultalg(A::Factorization, b, ::OperatorAssumptions{true}) - DefaultAlgorithmChoice.DirectLdiv! + DefaultLinearSolver(DefaultAlgorithmChoice.DirectLdiv!) end function defaultalg(A::Diagonal, b, ::OperatorAssumptions{true}) - DefaultAlgorithmChoice.DiagonalFactorization + DefaultLinearSolver(DefaultAlgorithmChoice.DiagonalFactorization) end function defaultalg(A::Diagonal, b, ::OperatorAssumptions{Nothing}) - DefaultAlgorithmChoice.DiagonalFactorization + DefaultLinearSolver(DefaultAlgorithmChoice.DiagonalFactorization) end function defaultalg(A::AbstractSparseMatrixCSC{Tv, Ti}, b, ::OperatorAssumptions{true}) where {Tv, Ti} - DefaultAlgorithmChoice.SparspakFactorization + DefaultLinearSolver(DefaultAlgorithmChoice.SparspakFactorization) end @static if INCLUDE_SPARSE function defaultalg(A::AbstractSparseMatrixCSC{<:Union{Float64, ComplexF64}, Ti}, b, ::OperatorAssumptions{true}) where {Ti} if length(b) <= 10_000 - DefaultAlgorithmChoice.KLUFactorization + DefaultLinearSolver(DefaultAlgorithmChoice.KLUFactorization) else - DefaultAlgorithmChoice.UMFPACKFactorization + DefaultLinearSolver(DefaultAlgorithmChoice.UMFPACKFactorization) end end end function defaultalg(A::GPUArraysCore.AbstractGPUArray, b, assump::OperatorAssumptions{true}) if VERSION >= v"1.8-" - DefaultAlgorithmChoice.LUFactorization + DefaultLinearSolver(DefaultAlgorithmChoice.LUFactorization) else - DefaultAlgorithmChoice.QRFactorization + DefaultLinearSolver(DefaultAlgorithmChoice.QRFactorization) end end function defaultalg(A::GPUArraysCore.AbstractGPUArray, b, assump::OperatorAssumptions{true, OperatorCondition.IllConditioned}) - DefaultAlgorithmChoice.QRFactorization + DefaultLinearSolver(DefaultAlgorithmChoice.QRFactorization) end function defaultalg(A, b::GPUArraysCore.AbstractGPUArray, assump::OperatorAssumptions{true}) if VERSION >= v"1.8-" - DefaultAlgorithmChoice.LUFactorization + DefaultLinearSolver(DefaultAlgorithmChoice.LUFactorization) else - DefaultAlgorithmChoice.QRFactorization + DefaultLinearSolver(DefaultAlgorithmChoice.QRFactorization) end end function defaultalg(A, b::GPUArraysCore.AbstractGPUArray, assump::OperatorAssumptions{true, OperatorCondition.IllConditioned}) - DefaultAlgorithmChoice.QRFactorization + DefaultLinearSolver(DefaultAlgorithmChoice.QRFactorization) end function defaultalg(A::SciMLBase.AbstractSciMLOperator, b, assumptions::OperatorAssumptions{true}) if has_ldiv!(A) - return DefaultAlgorithmChoice.DirectLdiv! + return DefaultLinearSolver(DefaultAlgorithmChoice.DirectLdiv!) end - DefaultAlgorithmChoice.KrylovJL_GMRES + DefaultLinearSolver( DefaultAlgorithmChoice.KrylovJL_GMRES) end # Ambiguity handling function defaultalg(A::SciMLBase.AbstractSciMLOperator, b, assumptions::OperatorAssumptions{Nothing}) if has_ldiv!(A) - return DefaultAlgorithmChoice.DirectLdiv! + return DefaultLinearSolver(DefaultAlgorithmChoice.DirectLdiv!) end - DefaultAlgorithmChoice.KrylovJL_GMRES + DefaultLinearSolver(DefaultAlgorithmChoice.KrylovJL_GMRES) end function defaultalg(A::SciMLBase.AbstractSciMLOperator, b, assumptions::OperatorAssumptions{false}) m, n = size(A) if m < n - DefaultAlgorithmChoice.KrylovJL_CRAIGMR + DefaultLinearSolver(DefaultAlgorithmChoice.KrylovJL_CRAIGMR) else - DefaultAlgorithmChoice.KrylovJL_LSMR + DefaultLinearSolver(DefaultAlgorithmChoice.KrylovJL_LSMR) end end @@ -146,29 +161,29 @@ end function defaultalg(A::GPUArraysCore.AbstractGPUArray, b::GPUArraysCore.AbstractGPUArray, ::OperatorAssumptions{true}) if VERSION >= v"1.8-" - DefaultAlgorithmChoice.LUFactorization + DefaultLinearSolver( DefaultAlgorithmChoice.LUFactorization) else - DefaultAlgorithmChoice.QRFactorization + DefaultLinearSolver(DefaultAlgorithmChoice.QRFactorization) end end function defaultalg(A::GPUArraysCore.AbstractGPUArray, b::GPUArraysCore.AbstractGPUArray, ::OperatorAssumptions{true, OperatorCondition.IllConditioned}) - DefaultAlgorithmChoice.QRFactorization + DefaultLinearSolver(DefaultAlgorithmChoice.QRFactorization) end function defaultalg(A::GPUArraysCore.AbstractGPUArray, b, ::OperatorAssumptions{false}) - DefaultAlgorithmChoice.QRFactorization + DefaultLinearSolver(DefaultAlgorithmChoice.QRFactorization) end function defaultalg(A, b::GPUArraysCore.AbstractGPUArray, ::OperatorAssumptions{false}) - DefaultAlgorithmChoice.QRFactorization + DefaultLinearSolver(DefaultAlgorithmChoice.QRFactorization) end # Handle ambiguity function defaultalg(A::GPUArraysCore.AbstractGPUArray, b::GPUArraysCore.AbstractGPUArray, ::OperatorAssumptions{false}) - DefaultAlgorithmChoice.QRFactorization + DefaultLinearSolver(DefaultAlgorithmChoice.QRFactorization) end # Allows A === nothing as a stand-in for dense matrix @@ -217,25 +232,58 @@ function defaultalg(A, b, assump::OperatorAssumptions{true}) else alg = DefaultAlgorithmChoice.KrylovJL_GMRES end - alg + DefaultLinearSolver(alg) end function defaultalg(A, b, ::OperatorAssumptions{false, OperatorCondition.WellConditioned}) - DefaultAlgorithmChoice.NormalCholeskyFactorization + DefaultLinearSolver(DefaultAlgorithmChoice.NormalCholeskyFactorization) end function defaultalg(A, b, ::OperatorAssumptions{false, OperatorCondition.IllConditioned}) - DefaultAlgorithmChoice.QRFactorization + DefaultLinearSolver(DefaultAlgorithmChoice.QRFactorization) end function defaultalg(A, b, ::OperatorAssumptions{false, OperatorCondition.VeryIllConditioned}) - DefaultAlgorithmChoice.QRFactorization + DefaultLinearSolver(DefaultAlgorithmChoice.QRFactorization) end function defaultalg(A, b, ::OperatorAssumptions{false, OperatorCondition.SuperIllConditioned}) - DefaultAlgorithmChoice.SVDFactorization + DefaultLinearSolver(DefaultAlgorithmChoice.SVDFactorization) +end + +function algchoice_to_alg(alg::Symbol) + if alg === :SVDFactorization + SVDFactorization(false, LinearAlgebra.QRIteration()) + elseif alg === :RowMaximumGenericLUFactorization + GenericLUFactorization(RowMaximum()) + elseif alg === :LDLtFactorization + GenericFactorization() + #GenericFactorization(; fact_alg = ldlt!) + elseif alg === :LUFactorization + LUFactorization() + elseif alg === :QRFactorization + QRFactorization() + elseif alg === :DiagonalFactorization + DiagonalFactorization() + elseif alg === :DirectLdiv! + DirectLdiv!() + elseif alg === :SparspakFactorization + SparspakFactorization() + elseif alg === :KLUFactorization + KLUFactorization() + elseif alg === :UMFPACKFactorization + UMFPACKFactorization() + elseif alg === :KrylovJL_GMRES + KrylovJL_GMRES() + elseif alg === :GenericLUFactorization + GenericLUFactorization() + elseif alg === :RFLUFactorization + RFLUFactorization() + else + error("Algorithm choice symbol $alg not allowed in the default") + end end ## Catch high level interface @@ -267,34 +315,33 @@ cache.cacheval = NamedTuple(LUFactorization = cache of LUFactorization, ...) """ @generated function init_cacheval(alg::DefaultLinearSolver, A, b, u, Pl, Pr, maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) - caches = [$alg = :(init_cacheval($alg(), A, b, u, Pl, Pr, maxiters, abstol, reltol, + caches = [:(init_cacheval($(algchoice_to_alg(alg)), A, b, u, Pl, Pr, maxiters, abstol, reltol, verbose, assumptions)) for alg in first.(EnumX.symbol_map(DefaultAlgorithmChoice.T))] - quote - return NamedTuple($caches...) - end + Expr(:call,:DefaultLinearSolverInit, caches...) end """ -if cache.alg.alg === DefaultAlgorithmChoice.LUFactorization - cache.cacheval[LUFactorization] +if algsym === :LUFactorization + cache.cacheval.LUFactorization = ... else ... end """ -@generated function get_cacheval(cache::LinearCache) +@generated function get_cacheval(cache::LinearCache, algsym::Symbol) ex = :() for alg in first.(EnumX.symbol_map(DefaultAlgorithmChoice.T)) - ex = if expr === :() - Expr(:elseif, :(cache.alg.alg === $alg),:(cache.cacheval[$alg])) + ex = if ex === :() + Expr(:elseif, :(algsym === $(Meta.quot(alg))),:(getfield(cache.cacheval,$(Meta.quot(alg))))) else - Expr(:elseif, :(cache.alg.alg === $alg),:(cache.cacheval[$alg]),ex) + Expr(:elseif, :(algsym === $(Meta.quot(alg))),:(getfield(cache.cacheval, $(Meta.quot(alg)))),ex) end end ex = Expr(:if,ex.args...) quote - if cache.alg === DefaultLinearSolver + @show algsym + if cache.alg isa DefaultLinearSolver $ex else cache.cacheval @@ -302,6 +349,10 @@ end end end +defaultalg_symbol(::Type{T}) where T = Symbol(SciMLBase.parameterless_type(T)) +defaultalg_symbol(::Type{<:GenericLUFactorization{LinearAlgebra.RowMaximum}}) = :RowMaximumGenericLUFactorization +defaultalg_symbol(::Type{<:GenericFactorization{typeof(ldlt!)}}) = :LDLtFactorization + """ if alg.alg === DefaultAlgorithmChoice.LUFactorization SciMLBase.solve!(cache, LUFactorization(), args...; kwargs...)) @@ -309,17 +360,22 @@ else ... end """ -function SciMLBase.solve!(cache::LinearCache, alg::DefaultLinearSolver, +@generated function SciMLBase.solve!(cache::LinearCache, alg::DefaultLinearSolver, args...; assumptions::OperatorAssumptions = OperatorAssumptions(), kwargs...) ex = :() for alg in first.(EnumX.symbol_map(DefaultAlgorithmChoice.T)) - ex = if expr === :() - Expr(:elseif, :(alg.alg === $alg),:(SciMLBase.solve!(cache, $alg(), args...; kwargs...))) + newex = quote + sol = SciMLBase.solve!(cache, $(algchoice_to_alg(alg)), args...; kwargs...) + SciMLBase.build_linear_solution(alg, sol.u, sol.resid, sol.cache; + retcode = sol.retcode, + iters = sol.iters, stats = sol.stats) + end + ex = if ex === :() + Expr(:elseif, :(Symbol(alg.alg) === $(Meta.quot(alg))), newex) else - Expr(:elseif, :(alg.alg === $alg),:(SciMLBase.solve!(cache, $alg(), args...; kwargs...)),ex) + Expr(:elseif, :(Symbol(alg.alg) === $(Meta.quot(alg))), newex,ex) end end ex = Expr(:if,ex.args...) - end \ No newline at end of file diff --git a/src/factorization.jl b/src/factorization.jl index aef21ddbb..7b9c140d3 100644 --- a/src/factorization.jl +++ b/src/factorization.jl @@ -6,14 +6,16 @@ function _ldiv!(x::Vector, A::Factorization, b::Vector) ldiv!(A, x) end -function SciMLBase.solve!(cache::LinearCache, alg::AbstractFactorization; kwargs...) - if cache.isfresh - fact = do_factorization(alg, cache.A, cache.b, cache.u) - cache.cacheval = fact - cache.isfresh = false +@generated function SciMLBase.solve!(cache::LinearCache, alg::AbstractFactorization; kwargs...) + quote + if cache.isfresh + fact = do_factorization(alg, cache.A, cache.b, cache.u) + cache.cacheval = fact + cache.isfresh = false + end + y = _ldiv!(cache.u, get_cacheval(cache, $(Meta.quot(defaultalg_symbol(alg)))), cache.b) + SciMLBase.build_linear_solution(alg, y, nothing, cache) end - y = _ldiv!(cache.u, cache.cacheval, cache.b) - SciMLBase.build_linear_solution(alg, y, nothing, cache) end #RF Bad fallback: will fail if `A` is just a stand-in @@ -342,18 +344,32 @@ function init_cacheval(alg::UMFPACKFactorization, A, b, u, Pl, Pr, maxiters::Int reltol, verbose::Bool, assumptions::OperatorAssumptions) A = convert(AbstractMatrix, A) - zerobased = SparseArrays.getcolptr(A)[1] == 0 - @static if VERSION < v"1.9.0-DEV.1622" - return SuiteSparse.UMFPACK.UmfpackLU(C_NULL, C_NULL, size(A, 1), size(A, 2), - zerobased ? copy(SparseArrays.getcolptr(A)) : - SuiteSparse.decrement(SparseArrays.getcolptr(A)), - zerobased ? copy(rowvals(A)) : - SuiteSparse.decrement(rowvals(A)), - copy(nonzeros(A)), 0) - finalizer(SuiteSparse.UMFPACK.umfpack_free_symbolic, res) + + if typeof(A) <: SparseArrays.AbstractSparseArray + zerobased = SparseArrays.getcolptr(A)[1] == 0 + @static if VERSION < v"1.9.0-DEV.1622" + res = SuiteSparse.UMFPACK.UmfpackLU(C_NULL, C_NULL, size(A, 1), size(A, 2), + zerobased ? copy(SparseArrays.getcolptr(A)) : + SuiteSparse.decrement(SparseArrays.getcolptr(A)), + zerobased ? copy(rowvals(A)) : + SuiteSparse.decrement(rowvals(A)), + copy(nonzeros(A)), 0) + finalizer(SuiteSparse.UMFPACK.umfpack_free_symbolic, res) + return res + else + return SuiteSparse.UMFPACK.UmfpackLU(SparseMatrixCSC(size(A)..., getcolptr(A), + rowvals(A), nonzeros(A))) + end + else - return SuiteSparse.UMFPACK.UmfpackLU(SparseMatrixCSC(size(A)..., getcolptr(A), - rowvals(A), nonzeros(A))) + @static if VERSION < v"1.9.0-DEV.1622" + res = SuiteSparse.UMFPACK.UmfpackLU(C_NULL, C_NULL, 0, 0, + [0], Int64[], eltype(A)[], 0) + finalizer(SuiteSparse.UMFPACK.umfpack_free_symbolic, res) + return res + else + return SuiteSparse.UMFPACK.UmfpackLU(SparseMatrixCSC(0,0, [1], Int64[], eltype(A)[])) + end end end @@ -369,7 +385,7 @@ function SciMLBase.solve!(cache::LinearCache, alg::UMFPACKFactorization; kwargs. fact = lu(SparseMatrixCSC(size(A)..., getcolptr(A), rowvals(A), nonzeros(A))) else - fact = lu!(cache.cacheval, + fact = lu!(get_cacheval(cache, :UMFPACKFactorization), SparseMatrixCSC(size(A)..., getcolptr(A), rowvals(A), nonzeros(A))) end @@ -380,7 +396,7 @@ function SciMLBase.solve!(cache::LinearCache, alg::UMFPACKFactorization; kwargs. cache.isfresh = false end - y = ldiv!(cache.u, cache.cacheval, cache.b) + y = ldiv!(cache.u, get_cacheval(cache, :UMFPACKFactorization), cache.b) SciMLBase.build_linear_solution(alg, y, nothing, cache) end @@ -393,8 +409,12 @@ function init_cacheval(alg::KLUFactorization, A, b, u, Pl, Pr, maxiters::Int, ab reltol, verbose::Bool, assumptions::OperatorAssumptions) A = convert(AbstractMatrix, A) - return KLU.KLUFactorization(SparseMatrixCSC(size(A)..., getcolptr(A), rowvals(A), - nonzeros(A))) + if typeof(A) <: SparseArrays.AbstractSparseArray + return KLU.KLUFactorization(SparseMatrixCSC(size(A)..., getcolptr(A), rowvals(A), + nonzeros(A))) + else + return KLU.KLUFactorization(SparseMatrixCSC(0,0, [1], Int64[], eltype(A)[])) + end end function SciMLBase.solve!(cache::LinearCache, alg::KLUFactorization; kwargs...) @@ -415,7 +435,7 @@ function SciMLBase.solve!(cache::LinearCache, alg::KLUFactorization; kwargs...) if cache.cacheval._numeric === C_NULL # We MUST have a numeric factorization for reuse, unlike UMFPACK. KLU.klu_factor!(cache.cacheval) end - fact = KLU.klu!(cache.cacheval, + fact = KLU.klu!(get_cacheval(cache, :KLUFactorization), SparseMatrixCSC(size(A)..., getcolptr(A), rowvals(A), nonzeros(A))) end @@ -429,7 +449,7 @@ function SciMLBase.solve!(cache::LinearCache, alg::KLUFactorization; kwargs...) cache.isfresh = false end - y = ldiv!(cache.u, cache.cacheval, cache.b) + y = ldiv!(cache.u, get_cacheval(cache, :KLUFactorization), cache.b) SciMLBase.build_linear_solution(alg, y, nothing, cache) end @@ -506,7 +526,7 @@ function SciMLBase.solve!(cache::LinearCache, alg::NormalCholeskyFactorization; cache.u .= cache.cacheval \ (A' * cache.b) y = cache.u else - y = ldiv!(cache.u, cache.cacheval, A' * cache.b) + y = ldiv!(cache.u, get_cacheval(cache, :NormalCholeskyFactorization), A' * cache.b) end SciMLBase.build_linear_solution(alg, y, nothing, cache) end @@ -539,7 +559,7 @@ function SciMLBase.solve!(cache::LinearCache, alg::NormalBunchKaufmanFactorizati cache.cacheval = fact cache.isfresh = false end - y = ldiv!(cache.u, cache.cacheval, A' * cache.b) + y = ldiv!(cache.u, get_cacheval(cache, :NormalBunchKaufmanFactorization), A' * cache.b) SciMLBase.build_linear_solution(alg, y, nothing, cache) end @@ -685,15 +705,20 @@ function init_cacheval(::SparspakFactorization, A, b, u, Pl, Pr, maxiters::Int, reltol, verbose::Bool, assumptions::OperatorAssumptions) A = convert(AbstractMatrix, A) - return sparspaklu(SparseMatrixCSC(size(A)..., getcolptr(A), rowvals(A), nonzeros(A)), - factorize = false) + if typeof(A) <: SparseArrays.AbstractSparseArray + return sparspaklu(SparseMatrixCSC(size(A)..., getcolptr(A), rowvals(A), nonzeros(A)), + factorize = false) + else + return sparspaklu(SparseMatrixCSC(0,0, [1], Int64[], eltype(A)[]), + factorize = false) + end end function SciMLBase.solve!(cache::LinearCache, alg::SparspakFactorization; kwargs...) A = cache.A if cache.isfresh if cache.cacheval !== nothing && alg.reuse_symbolic - fact = sparspaklu!(cache.cacheval, + fact = sparspaklu!(get_cacheval(cache, :SparspakFactorization), SparseMatrixCSC(size(A)..., getcolptr(A), rowvals(A), nonzeros(A))) else @@ -703,6 +728,6 @@ function SciMLBase.solve!(cache::LinearCache, alg::SparspakFactorization; kwargs cache.cacheval = fact cache.isfresh = false end - y = ldiv!(cache.u, cache.cacheval, cache.b) + y = ldiv!(cache.u, get_cacheval(cache, :SparspakFactorization), cache.b) SciMLBase.build_linear_solution(alg, y, nothing, cache) end From 6d00eb249c97fc825935620afd0d54d8fcc6c61a Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Tue, 30 May 2023 06:52:36 -0700 Subject: [PATCH 03/19] Inference works --- src/common.jl | 21 ++-- src/default.jl | 314 +++++++++++++++++++++---------------------------- 2 files changed, 144 insertions(+), 191 deletions(-) diff --git a/src/common.jl b/src/common.jl index 5ac36bb3d..b8d4d1ce6 100644 --- a/src/common.jl +++ b/src/common.jl @@ -53,17 +53,19 @@ end Sets the operator `A` assumptions used as part of the default algorithm """ -struct OperatorAssumptions{issq, condition} end +struct OperatorAssumptions{T} + issq::T + condition::OperatorCondition.T +end + function OperatorAssumptions(issquare = nothing; condition::OperatorCondition.T = OperatorCondition.IllConditioned) - issq = something(_unwrap_val(issquare), Nothing) - condition = _unwrap_val(condition) - OperatorAssumptions{issq, condition}() + OperatorAssumptions{typeof(issquare)}(issquare, condition) end -__issquare(::OperatorAssumptions{issq, condition}) where {issq, condition} = issq -__conditioning(::OperatorAssumptions{issq, condition}) where {issq, condition} = condition +__issquare(assump::OperatorAssumptions) = assump.issq +__conditioning(assump::OperatorAssumptions) = assump.condition -mutable struct LinearCache{TA, Tb, Tu, Tp, Talg, Tc, Tl, Tr, Ttol, issq, condition} +mutable struct LinearCache{TA, Tb, Tu, Tp, Talg, Tc, Tl, Tr, Ttol, issq} A::TA b::Tb u::Tu @@ -77,7 +79,7 @@ mutable struct LinearCache{TA, Tb, Tu, Tp, Talg, Tc, Tl, Tr, Ttol, issq, conditi reltol::Ttol maxiters::Int verbose::Bool - assumptions::OperatorAssumptions{issq, condition} + assumptions::OperatorAssumptions{issq} end function Base.setproperty!(cache::LinearCache, name::Symbol, x) @@ -150,8 +152,7 @@ function SciMLBase.init(prob::LinearProblem, alg::SciMLLinearSolveAlgorithm, typeof(Pl), typeof(Pr), typeof(reltol), - __issquare(assumptions), - __conditioning(assumptions) + typeof(assumptions.issq), }(A, b, u0, diff --git a/src/default.jl b/src/default.jl index e7b145b81..ebee645f0 100644 --- a/src/default.jl +++ b/src/default.jl @@ -18,7 +18,7 @@ struct DefaultLinearSolver <: SciMLLinearSolveAlgorithm alg::DefaultAlgorithmChoice.T end -mutable struct DefaultLinearSolverInit{T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13} +mutable struct DefaultLinearSolverInit{T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11} LUFactorization::T1 QRFactorization::T2 DiagonalFactorization::T3 @@ -27,232 +27,185 @@ mutable struct DefaultLinearSolverInit{T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T1 KLUFactorization::T6 UMFPACKFactorization::T7 KrylovJL_GMRES::T8 - GenericLUFactorization::T9 - RowMaximumGenericLUFactorization::T10 - RFLUFactorization::T11 - LDLtFactorization::T12 - SVDFactorization::T13 + GenericLUFactorization::Any + RowMaximumGenericLUFactorization::T9 + RFLUFactorization::T10 + LDLtFactorization::Any + SVDFactorization::T11 end # Legacy fallback # For SciML algorithms already using `defaultalg`, all assume square matrix. -defaultalg(A, b) = defaultalg(A, b, OperatorAssumptions(Val(true))) +defaultalg(A, b) = defaultalg(A, b, OperatorAssumptions(true)) function defaultalg(A::Union{DiffEqArrayOperator, MatrixOperator}, b, - assumptions::OperatorAssumptions) - DefaultLinearSolver(defaultalg(A.A, b, assumptions)) + assump::OperatorAssumptions) + DefaultLinearSolver(defaultalg(A.A, b, assump)) end -# Ambiguity handling -function defaultalg(A::Union{DiffEqArrayOperator, MatrixOperator}, b, - assumptions::OperatorAssumptions{nothing}) - DefaultLinearSolver(defaultalg(A.A, b, assumptions)) -end - -function defaultalg(A::Union{DiffEqArrayOperator, MatrixOperator}, b, - assumptions::OperatorAssumptions{false}) - DefaultLinearSolver(defaultalg(A.A, b, assumptions)) -end - -function defaultalg(A::Union{DiffEqArrayOperator, MatrixOperator}, b, - assumptions::OperatorAssumptions{true}) - DefaultLinearSolver(defaultalg(A.A, b, assumptions)) -end - -function defaultalg(A, b, ::OperatorAssumptions{Nothing}) +function defaultalg(A, b, assump::OperatorAssumptions{Nothing}) issq = issquare(A) - DefaultLinearSolver(defaultalg(A, b, OperatorAssumptions(Val(issq)))) + DefaultLinearSolver(defaultalg(A, b, OperatorAssumptions(issq, assump.condition))) end -function defaultalg(A::Tridiagonal, b, ::OperatorAssumptions{true}) - DefaultLinearSolver(DefaultAlgorithmChoice.LUFactorization) -end -function defaultalg(A::Tridiagonal, b, ::OperatorAssumptions{false}) - DefaultLinearSolver(DefaultAlgorithmChoice.QRFactorization) +function defaultalg(A::Tridiagonal, b, assump::OperatorAssumptions{true}) + if assump.issq + DefaultLinearSolver(DefaultAlgorithmChoice.LUFactorization) + else + DefaultLinearSolver(DefaultAlgorithmChoice.QRFactorization) + end end -function defaultalg(A::SymTridiagonal, b, ::OperatorAssumptions{true}) + +function defaultalg(A::SymTridiagonal, b, ::OperatorAssumptions) DefaultLinearSolver(DefaultAlgorithmChoice.LDLtFactorization) end -function defaultalg(A::Bidiagonal, b, ::OperatorAssumptions{true}) +function defaultalg(A::Bidiagonal, b, ::OperatorAssumptions) DefaultLinearSolver(DefaultAlgorithmChoice.DirectLdiv!) end -function defaultalg(A::Factorization, b, ::OperatorAssumptions{true}) +function defaultalg(A::Factorization, b, ::OperatorAssumptions) DefaultLinearSolver(DefaultAlgorithmChoice.DirectLdiv!) end -function defaultalg(A::Diagonal, b, ::OperatorAssumptions{true}) - DefaultLinearSolver(DefaultAlgorithmChoice.DiagonalFactorization) -end -function defaultalg(A::Diagonal, b, ::OperatorAssumptions{Nothing}) +function defaultalg(A::Diagonal, b, ::OperatorAssumptions) DefaultLinearSolver(DefaultAlgorithmChoice.DiagonalFactorization) end function defaultalg(A::AbstractSparseMatrixCSC{Tv, Ti}, b, - ::OperatorAssumptions{true}) where {Tv, Ti} - DefaultLinearSolver(DefaultAlgorithmChoice.SparspakFactorization) + assump::OperatorAssumptions{true}) where {Tv, Ti} + if assump.issq + DefaultLinearSolver(DefaultAlgorithmChoice.SparspakFactorization) + else + error("Generic number sparse factorization for non-square is not currently handled") + end end @static if INCLUDE_SPARSE function defaultalg(A::AbstractSparseMatrixCSC{<:Union{Float64, ComplexF64}, Ti}, b, - ::OperatorAssumptions{true}) where {Ti} - if length(b) <= 10_000 - DefaultLinearSolver(DefaultAlgorithmChoice.KLUFactorization) + assump::OperatorAssumptions{true}) where {Ti} + if assump.issq + if length(b) <= 10_000 + DefaultLinearSolver(DefaultAlgorithmChoice.KLUFactorization) + else + DefaultLinearSolver(DefaultAlgorithmChoice.UMFPACKFactorization) + end else - DefaultLinearSolver(DefaultAlgorithmChoice.UMFPACKFactorization) + error("Non-square sparse factorization case not handled") end end end -function defaultalg(A::GPUArraysCore.AbstractGPUArray, b, assump::OperatorAssumptions{true}) - if VERSION >= v"1.8-" - DefaultLinearSolver(DefaultAlgorithmChoice.LUFactorization) - else +function defaultalg(A::GPUArraysCore.AbstractGPUArray, b, assump::OperatorAssumptions) + if assump.condition === OperatorConodition.IllConditioned || !assump.issq DefaultLinearSolver(DefaultAlgorithmChoice.QRFactorization) - end -end - -function defaultalg(A::GPUArraysCore.AbstractGPUArray, b, - assump::OperatorAssumptions{true, OperatorCondition.IllConditioned}) - DefaultLinearSolver(DefaultAlgorithmChoice.QRFactorization) -end - -function defaultalg(A, b::GPUArraysCore.AbstractGPUArray, assump::OperatorAssumptions{true}) - if VERSION >= v"1.8-" - DefaultLinearSolver(DefaultAlgorithmChoice.LUFactorization) else - DefaultLinearSolver(DefaultAlgorithmChoice.QRFactorization) - end + @static if VERSION >= v"1.8-" + DefaultLinearSolver(DefaultAlgorithmChoice.LUFactorization) + else + DefaultLinearSolver(DefaultAlgorithmChoice.QRFactorization) + end + end end -function defaultalg(A, b::GPUArraysCore.AbstractGPUArray, - assump::OperatorAssumptions{true, OperatorCondition.IllConditioned}) - DefaultLinearSolver(DefaultAlgorithmChoice.QRFactorization) +# A === nothing case +function defaultalg(A, b::GPUArraysCore.AbstractGPUArray, assump::OperatorAssumptions) + if assump.condition === OperatorConodition.IllConditioned || !assump.issq + DefaultLinearSolver(DefaultAlgorithmChoice.QRFactorization) + else + @static if VERSION >= v"1.8-" + DefaultLinearSolver(DefaultAlgorithmChoice.LUFactorization) + else + DefaultLinearSolver(DefaultAlgorithmChoice.QRFactorization) + end + end end -function defaultalg(A::SciMLBase.AbstractSciMLOperator, b, - assumptions::OperatorAssumptions{true}) - if has_ldiv!(A) - return DefaultLinearSolver(DefaultAlgorithmChoice.DirectLdiv!) - end - - DefaultLinearSolver( DefaultAlgorithmChoice.KrylovJL_GMRES) +# Ambiguity handling +function defaultalg(A::GPUArraysCore.AbstractGPUArray, b::GPUArraysCore.AbstractGPUArray, assump::OperatorAssumptions) + if assump.condition === OperatorConodition.IllConditioned || !assump.issq + DefaultLinearSolver(DefaultAlgorithmChoice.QRFactorization) + else + @static if VERSION >= v"1.8-" + DefaultLinearSolver(DefaultAlgorithmChoice.LUFactorization) + else + DefaultLinearSolver(DefaultAlgorithmChoice.QRFactorization) + end + end end -# Ambiguity handling function defaultalg(A::SciMLBase.AbstractSciMLOperator, b, - assumptions::OperatorAssumptions{Nothing}) + assump::OperatorAssumptions) if has_ldiv!(A) return DefaultLinearSolver(DefaultAlgorithmChoice.DirectLdiv!) - end - - DefaultLinearSolver(DefaultAlgorithmChoice.KrylovJL_GMRES) -end - -function defaultalg(A::SciMLBase.AbstractSciMLOperator, b, - assumptions::OperatorAssumptions{false}) - m, n = size(A) - if m < n - DefaultLinearSolver(DefaultAlgorithmChoice.KrylovJL_CRAIGMR) - else - DefaultLinearSolver(DefaultAlgorithmChoice.KrylovJL_LSMR) - end -end - -# Handle ambiguity -function defaultalg(A::GPUArraysCore.AbstractGPUArray, b::GPUArraysCore.AbstractGPUArray, - ::OperatorAssumptions{true}) - if VERSION >= v"1.8-" - DefaultLinearSolver( DefaultAlgorithmChoice.LUFactorization) + elseif !assump.issq + m, n = size(A) + if m < n + DefaultLinearSolver(DefaultAlgorithmChoice.KrylovJL_CRAIGMR) + else + DefaultLinearSolver(DefaultAlgorithmChoice.KrylovJL_LSMR) + end else - DefaultLinearSolver(DefaultAlgorithmChoice.QRFactorization) + DefaultLinearSolver( DefaultAlgorithmChoice.KrylovJL_GMRES) end end -function defaultalg(A::GPUArraysCore.AbstractGPUArray, b::GPUArraysCore.AbstractGPUArray, - ::OperatorAssumptions{true, OperatorCondition.IllConditioned}) - DefaultLinearSolver(DefaultAlgorithmChoice.QRFactorization) -end - -function defaultalg(A::GPUArraysCore.AbstractGPUArray, b, ::OperatorAssumptions{false}) - DefaultLinearSolver(DefaultAlgorithmChoice.QRFactorization) -end - -function defaultalg(A, b::GPUArraysCore.AbstractGPUArray, ::OperatorAssumptions{false}) - DefaultLinearSolver(DefaultAlgorithmChoice.QRFactorization) -end - -# Handle ambiguity -function defaultalg(A::GPUArraysCore.AbstractGPUArray, b::GPUArraysCore.AbstractGPUArray, - ::OperatorAssumptions{false}) - DefaultLinearSolver(DefaultAlgorithmChoice.QRFactorization) -end - # Allows A === nothing as a stand-in for dense matrix -function defaultalg(A, b, assump::OperatorAssumptions{true}) - # Special case on Arrays: avoid BLAS for RecursiveFactorization.jl when - # it makes sense according to the benchmarks, which is dependent on - # whether MKL or OpenBLAS is being used - if (A === nothing && !(b isa GPUArraysCore.AbstractGPUArray)) || A isa Matrix - if (A === nothing || eltype(A) <: Union{Float32, Float64, ComplexF32, ComplexF64}) && - ArrayInterface.can_setindex(b) && - (__conditioning(assump) === OperatorCondition.IllConditioned || - __conditioning(assump) === OperatorCondition.WellConditioned) - if length(b) <= 10 - if __conditioning(assump) === OperatorCondition.IllConditioned - alg = DefaultAlgorithmChoice.RowMaximumGenericLUFactorization +function defaultalg(A, b, assump::OperatorAssumptions) + if assump.issq + # Special case on Arrays: avoid BLAS for RecursiveFactorization.jl when + # it makes sense according to the benchmarks, which is dependent on + # whether MKL or OpenBLAS is being used + alg = if (A === nothing && !(b isa GPUArraysCore.AbstractGPUArray)) || A isa Matrix + if (A === nothing || eltype(A) <: Union{Float32, Float64, ComplexF32, ComplexF64}) && + ArrayInterface.can_setindex(b) && + (__conditioning(assump) === OperatorCondition.IllConditioned || + __conditioning(assump) === OperatorCondition.WellConditioned) + if length(b) <= 10 + if __conditioning(assump) === OperatorCondition.IllConditioned + DefaultAlgorithmChoice.RowMaximumGenericLUFactorization + else + DefaultAlgorithmChoice.GenericLUFactorization + end + elseif (length(b) <= 100 || (isopenblas() && length(b) <= 500)) && + (A === nothing ? eltype(b) <: Union{Float32, Float64} : + eltype(A) <: Union{Float32, Float64}) + DefaultAlgorithmChoice.RFLUFactorization + #elseif A === nothing || A isa Matrix + # alg = FastLUFactorization() else - alg = DefaultAlgorithmChoice.GenericLUFactorization - end - elseif (length(b) <= 100 || (isopenblas() && length(b) <= 500)) && - (A === nothing ? eltype(b) <: Union{Float32, Float64} : - eltype(A) <: Union{Float32, Float64}) - DefaultAlgorithmChoice.RFLUFactorization - #elseif A === nothing || A isa Matrix - # alg = FastLUFactorization() - else - if __conditioning(assump) === OperatorCondition.IllConditioned - alg = DefaultAlgorithmChoice.RowMaximumGenericLUFactorization - else - alg = DefaultAlgorithmChoice.GenericLUFactorization + if __conditioning(assump) === OperatorCondition.IllConditioned + DefaultAlgorithmChoice.RowMaximumGenericLUFactorization + else + DefaultAlgorithmChoice.GenericLUFactorization + end end + elseif __conditioning(assump) === OperatorCondition.VeryIllConditioned + DefaultAlgorithmChoice.QRFactorization + elseif __conditioning(assump) === OperatorCondition.SuperIllConditioned + DefaultAlgorithmChoice.SVDFactorization + else + DefaultAlgorithmChoice.LUFactorization end - elseif __conditioning(assump) === OperatorCondition.VeryIllConditioned - alg = DefaultAlgorithmChoice.QRFactorization - elseif __conditioning(assump) === OperatorCondition.SuperIllConditioned - alg = DefaultAlgorithmChoice.SVDFactorization - else - alg = DefaultAlgorithmChoice.LUFactorization - end - # This catches the cases where a factorization overload could exist - # For example, BlockBandedMatrix - elseif A !== nothing && ArrayInterface.isstructured(A) - alg = DefaultAlgorithmChoice.GenericFactorization + # This catches the cases where a factorization overload could exist + # For example, BlockBandedMatrix + elseif A !== nothing && ArrayInterface.isstructured(A) + DefaultAlgorithmChoice.GenericFactorization - # Not factorizable operator, default to only using A*x - else - alg = DefaultAlgorithmChoice.KrylovJL_GMRES + # Not factorizable operator, default to only using A*x + else + DefaultAlgorithmChoice.KrylovJL_GMRES + end + elseif assump.condition === OperatorCondition.WellConditioned + DefaultAlgorithmChoice.NormalCholeskyFactorization + elseif assump.condition === OperatorCondition.IllConditioned + DefaultAlgorithmChoice.QRFactorization + elseif assump.condition === OperatorCondition.VeryIllConditioned + DefaultAlgorithmChoice.QRFactorization + elseif assump.condition === OperatorCondition.SuperIllConditioned + DefaultAlgorithmChoice.SVDFactorization end DefaultLinearSolver(alg) end -function defaultalg(A, b, ::OperatorAssumptions{false, OperatorCondition.WellConditioned}) - DefaultLinearSolver(DefaultAlgorithmChoice.NormalCholeskyFactorization) -end - -function defaultalg(A, b, ::OperatorAssumptions{false, OperatorCondition.IllConditioned}) - DefaultLinearSolver(DefaultAlgorithmChoice.QRFactorization) -end - -function defaultalg(A, b, - ::OperatorAssumptions{false, OperatorCondition.VeryIllConditioned}) - DefaultLinearSolver(DefaultAlgorithmChoice.QRFactorization) -end - -function defaultalg(A, b, - ::OperatorAssumptions{false, OperatorCondition.SuperIllConditioned}) - DefaultLinearSolver(DefaultAlgorithmChoice.SVDFactorization) -end - function algchoice_to_alg(alg::Symbol) if alg === :SVDFactorization SVDFactorization(false, LinearAlgebra.QRIteration()) @@ -290,34 +243,34 @@ end function SciMLBase.init(prob::LinearProblem, alg::Nothing, args...; - assumptions = OperatorAssumptions(Val(issquare(prob.A))), + assumptions = OperatorAssumptions(issquare(prob.A)), kwargs...) alg = defaultalg(prob.A, prob.b, assumptions) SciMLBase.init(prob, alg, args...; assumptions, kwargs...) end function SciMLBase.solve!(cache::LinearCache, alg::Nothing, - args...; assumptions::OperatorAssumptions = OperatorAssumptions(), + args...; assump::OperatorAssumptions = OperatorAssumptions(), kwargs...) @unpack A, b = cache - SciMLBase.solve!(cache, defaultalg(A, b, assumptions), args...; kwargs...) + SciMLBase.solve!(cache, defaultalg(A, b, assump), args...; kwargs...) end function init_cacheval(alg::Nothing, A, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) - init_cacheval(defaultalg(A, b, assumptions), A, b, u, Pl, Pr, maxiters, abstol, reltol, + verbose::Bool, assump::OperatorAssumptions) + init_cacheval(defaultalg(A, b, assump), A, b, u, Pl, Pr, maxiters, abstol, reltol, verbose, - assumptions) + assump) end """ cache.cacheval = NamedTuple(LUFactorization = cache of LUFactorization, ...) """ @generated function init_cacheval(alg::DefaultLinearSolver, A, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::Bool, assump::OperatorAssumptions) caches = [:(init_cacheval($(algchoice_to_alg(alg)), A, b, u, Pl, Pr, maxiters, abstol, reltol, verbose, - assumptions)) for alg in first.(EnumX.symbol_map(DefaultAlgorithmChoice.T))] + assump)) for alg in first.(EnumX.symbol_map(DefaultAlgorithmChoice.T))] Expr(:call,:DefaultLinearSolverInit, caches...) end @@ -340,7 +293,6 @@ end ex = Expr(:if,ex.args...) quote - @show algsym if cache.alg isa DefaultLinearSolver $ex else @@ -361,7 +313,7 @@ else end """ @generated function SciMLBase.solve!(cache::LinearCache, alg::DefaultLinearSolver, - args...; assumptions::OperatorAssumptions = OperatorAssumptions(), + args...; assump::OperatorAssumptions = OperatorAssumptions(), kwargs...) ex = :() for alg in first.(EnumX.symbol_map(DefaultAlgorithmChoice.T)) From d6dfa16ba6c55983ab2b72bc8751ae5dad5ec97a Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Tue, 30 May 2023 07:13:22 -0700 Subject: [PATCH 04/19] setup LDLt factorization to make all caches concrete --- src/default.jl | 23 +++++++++++------------ src/factorization.jl | 39 +++++++++++++++++++++++++++++++++------ 2 files changed, 44 insertions(+), 18 deletions(-) diff --git a/src/default.jl b/src/default.jl index ebee645f0..47e8cab6e 100644 --- a/src/default.jl +++ b/src/default.jl @@ -18,7 +18,7 @@ struct DefaultLinearSolver <: SciMLLinearSolveAlgorithm alg::DefaultAlgorithmChoice.T end -mutable struct DefaultLinearSolverInit{T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11} +mutable struct DefaultLinearSolverInit{T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13} LUFactorization::T1 QRFactorization::T2 DiagonalFactorization::T3 @@ -27,11 +27,11 @@ mutable struct DefaultLinearSolverInit{T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11} KLUFactorization::T6 UMFPACKFactorization::T7 KrylovJL_GMRES::T8 - GenericLUFactorization::Any - RowMaximumGenericLUFactorization::T9 - RFLUFactorization::T10 - LDLtFactorization::Any - SVDFactorization::T11 + GenericLUFactorization::T9 + RowMaximumGenericLUFactorization::T10 + RFLUFactorization::T11 + LDLtFactorization::T12 + SVDFactorization::T13 end # Legacy fallback @@ -212,8 +212,7 @@ function algchoice_to_alg(alg::Symbol) elseif alg === :RowMaximumGenericLUFactorization GenericLUFactorization(RowMaximum()) elseif alg === :LDLtFactorization - GenericFactorization() - #GenericFactorization(; fact_alg = ldlt!) + LDLtFactorization() elseif alg === :LUFactorization LUFactorization() elseif alg === :QRFactorization @@ -284,7 +283,7 @@ end @generated function get_cacheval(cache::LinearCache, algsym::Symbol) ex = :() for alg in first.(EnumX.symbol_map(DefaultAlgorithmChoice.T)) - ex = if ex === :() + ex = if ex == :() Expr(:elseif, :(algsym === $(Meta.quot(alg))),:(getfield(cache.cacheval,$(Meta.quot(alg))))) else Expr(:elseif, :(algsym === $(Meta.quot(alg))),:(getfield(cache.cacheval, $(Meta.quot(alg)))),ex) @@ -323,11 +322,11 @@ end retcode = sol.retcode, iters = sol.iters, stats = sol.stats) end - ex = if ex === :() - Expr(:elseif, :(Symbol(alg.alg) === $(Meta.quot(alg))), newex) + ex = if ex == :() + Expr(:elseif, :(Symbol(alg.alg) === $(Meta.quot(alg))), newex, :(error("Algorithm Choice not Allowed"))) else Expr(:elseif, :(Symbol(alg.alg) === $(Meta.quot(alg))), newex,ex) end end - ex = Expr(:if,ex.args...) + ex = Expr(:if,ex.args...) end \ No newline at end of file diff --git a/src/factorization.jl b/src/factorization.jl index 7b9c140d3..49c03e958 100644 --- a/src/factorization.jl +++ b/src/factorization.jl @@ -108,6 +108,33 @@ function init_cacheval(alg::QRFactorization, A, b, u, Pl, Pr, ArrayInterface.qr_instance(convert(AbstractMatrix, A)) end +## LDLtFactorization + +struct LDLtFactorization{T} <: AbstractFactorization + shift::Float64 + perm::T +end + +function LDLtFactorization(shift = 0.0, perm = nothing) + LDLtFactorization(shift, perm) +end + +function do_factorization(alg::LDLtFactorization, A, b, u) + A = convert(AbstractMatrix, A) + if !(A isa SparseMatrixCSC) + fact = ldlt!(A) + else + fact = ldlt!(A, shift = alg.shift, perm = alg.perm) + end + return fact +end + +function init_cacheval(alg::LDLtFactorization, A, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) + ArrayInterface.ldlt_instance(convert(AbstractMatrix, A)) +end + ## SVDFactorization struct SVDFactorization{A} <: AbstractFactorization @@ -381,7 +408,7 @@ function SciMLBase.solve!(cache::LinearCache, alg::UMFPACKFactorization; kwargs. # Caches the symbolic factorization: https://github.com/JuliaLang/julia/pull/33738 if alg.check_pattern && !(SuiteSparse.decrement(SparseArrays.getcolptr(A)) == cache.cacheval.colptr && - SuiteSparse.decrement(SparseArrays.getrowval(A)) == cache.cacheval.rowval) + SuiteSparse.decrement(SparseArrays.getrowval(A)) == get_cacheval(cache,:UMFPACKFactorization).rowval) fact = lu(SparseMatrixCSC(size(A)..., getcolptr(A), rowvals(A), nonzeros(A))) else @@ -424,7 +451,7 @@ function SciMLBase.solve!(cache::LinearCache, alg::KLUFactorization; kwargs...) if cache.cacheval !== nothing && alg.reuse_symbolic if alg.check_pattern && !(SuiteSparse.decrement(SparseArrays.getcolptr(A)) == cache.cacheval.colptr && - SuiteSparse.decrement(SparseArrays.getrowval(A)) == cache.cacheval.rowval) + SuiteSparse.decrement(SparseArrays.getrowval(A)) == get_cacheval(cache,:KLUFactorization).rowval) fact = KLU.klu(SparseMatrixCSC(size(A)..., getcolptr(A), rowvals(A), nonzeros(A))) else @@ -473,7 +500,7 @@ function SciMLBase.solve!(cache::LinearCache, alg::RFLUFactorization{P, T}; kwargs...) where {P, T} A = cache.A A = convert(AbstractMatrix, A) - fact, ipiv = cache.cacheval + fact, ipiv = get_cacheval(cache,:RFLUFactorization) if cache.isfresh fact = RecursiveFactorization.lu!(A, ipiv, Val(P), Val(T)) cache.cacheval = (fact, ipiv) @@ -523,7 +550,7 @@ function SciMLBase.solve!(cache::LinearCache, alg::NormalCholeskyFactorization; cache.isfresh = false end if A isa SparseMatrixCSC - cache.u .= cache.cacheval \ (A' * cache.b) + cache.u .= get_cacheval(cache,:NormalCholeskyFactorization) \ (A' * cache.b) y = cache.u else y = ldiv!(cache.u, get_cacheval(cache, :NormalCholeskyFactorization), A' * cache.b) @@ -607,7 +634,7 @@ end function SciMLBase.solve!(cache::LinearCache, alg::FastLUFactorization; kwargs...) A = cache.A A = convert(AbstractMatrix, A) - ws_and_fact = cache.cacheval + ws_and_fact = get_cacheval(cache,:FastLUFactorization) if cache.isfresh # we will fail here if A is a different *size* than in a previous version of the same cache. # it may instead be desirable to resize the workspace. @@ -672,7 +699,7 @@ function SciMLBase.solve!(cache::LinearCache, alg::FastQRFactorization{P}; kwargs...) where {P} A = cache.A A = convert(AbstractMatrix, A) - ws_and_fact = cache.cacheval + ws_and_fact = get_cacheval(cache, :FastQRFactorization) if cache.isfresh # we will fail here if A is a different *size* than in a previous version of the same cache. # it may instead be desirable to resize the workspace. From 7568dcf6c5d4a62976812ccb3bab5218f4d2ecb3 Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Tue, 30 May 2023 08:46:59 -0700 Subject: [PATCH 05/19] lazy Krylov inits --- src/factorization.jl | 2 +- src/iterative_wrappers.jl | 64 +++++++++++++++++++++++++++------------ 2 files changed, 46 insertions(+), 20 deletions(-) diff --git a/src/factorization.jl b/src/factorization.jl index 49c03e958..5495e4a8e 100644 --- a/src/factorization.jl +++ b/src/factorization.jl @@ -506,7 +506,7 @@ function SciMLBase.solve!(cache::LinearCache, alg::RFLUFactorization{P, T}; cache.cacheval = (fact, ipiv) cache.isfresh = false end - y = ldiv!(cache.u, cache.cacheval[1], cache.b) + y = ldiv!(cache.u, get_cacheval(cache,:RFLUFactorization)[1], cache.b) SciMLBase.build_linear_solution(alg, y, nothing, cache) end diff --git a/src/iterative_wrappers.jl b/src/iterative_wrappers.jl index f72943298..17f0957c1 100644 --- a/src/iterative_wrappers.jl +++ b/src/iterative_wrappers.jl @@ -114,28 +114,54 @@ function get_KrylovJL_solver(KrylovAlg) return KS end +# zeroinit allows for init_cacheval to start by initing with A (0,0) function init_cacheval(alg::KrylovJL, A, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::Bool, assumptions::OperatorAssumptions; zeroinit = true) KS = get_KrylovJL_solver(alg.KrylovAlg) - memory = (alg.gmres_restart == 0) ? min(20, size(A, 1)) : alg.gmres_restart - - solver = if (alg.KrylovAlg === Krylov.dqgmres! || - alg.KrylovAlg === Krylov.diom! || - alg.KrylovAlg === Krylov.gmres! || - alg.KrylovAlg === Krylov.fgmres! || - alg.KrylovAlg === Krylov.gpmr! || - alg.KrylovAlg === Krylov.fom!) - KS(A, b, memory) - elseif (alg.KrylovAlg === Krylov.minres! || - alg.KrylovAlg === Krylov.symmlq! || - alg.KrylovAlg === Krylov.lslq! || - alg.KrylovAlg === Krylov.lsqr! || - alg.KrylovAlg === Krylov.lsmr!) - (alg.window != 0) ? KS(A, b; window = alg.window) : KS(A, b) + if zeroinit + solver = if (alg.KrylovAlg === Krylov.dqgmres! || + alg.KrylovAlg === Krylov.diom! || + alg.KrylovAlg === Krylov.gmres! || + alg.KrylovAlg === Krylov.fgmres! || + alg.KrylovAlg === Krylov.gpmr! || + alg.KrylovAlg === Krylov.fom!) + if A isa SparseMatrixCSC + KS(SparseMatrixCSC(0,0, [1], Int64[], eltype(A)[]), eltype(b)[], 1) + elseif A isa Matrix + KS(Matrix{eltype(A)}(undef, 0, 0), eltype(b)[], 1) + else + KS(A, b, 1) + end + else + if A isa SparseMatrixCSC + KS(SparseMatrixCSC(0,0, [1], Int64[], eltype(A)[]), eltype(b)[]) + elseif A isa Matrix + KS(Matrix{eltype(A)}(undef, 0, 0), eltype(b)[]) + else + KS(A, b) + end + end else - KS(A, b) - end + memory = (alg.gmres_restart == 0) ? min(20, size(A, 1)) : alg.gmres_restart + + solver = if (alg.KrylovAlg === Krylov.dqgmres! || + alg.KrylovAlg === Krylov.diom! || + alg.KrylovAlg === Krylov.gmres! || + alg.KrylovAlg === Krylov.fgmres! || + alg.KrylovAlg === Krylov.gpmr! || + alg.KrylovAlg === Krylov.fom!) + KS(A, b, memory) + elseif (alg.KrylovAlg === Krylov.minres! || + alg.KrylovAlg === Krylov.symmlq! || + alg.KrylovAlg === Krylov.lslq! || + alg.KrylovAlg === Krylov.lsqr! || + alg.KrylovAlg === Krylov.lsmr!) + (alg.window != 0) ? KS(A, b; window = alg.window) : KS(A, b) + else + KS(A, b) + end + end solver.x = u @@ -146,7 +172,7 @@ function SciMLBase.solve!(cache::LinearCache, alg::KrylovJL; kwargs...) if cache.isfresh solver = init_cacheval(alg, cache.A, cache.b, cache.u, cache.Pl, cache.Pr, cache.maxiters, cache.abstol, cache.reltol, cache.verbose, - cache.assumptions) + cache.assumptions, zeroinit = false) cache.cacheval = solver cache.isfresh = false end From 0984819a07ea912580ec3a8a441db5026035ba66 Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Tue, 30 May 2023 09:34:32 -0700 Subject: [PATCH 06/19] do a few more preallocation optimizations --- src/common.jl | 11 ++++++++- src/factorization.jl | 56 +++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 65 insertions(+), 2 deletions(-) diff --git a/src/common.jl b/src/common.jl index b8d4d1ce6..97719bffc 100644 --- a/src/common.jl +++ b/src/common.jl @@ -121,11 +121,20 @@ function SciMLBase.init(prob::LinearProblem, alg::SciMLLinearSolveAlgorithm, kwargs...) @unpack A, b, u0, p = prob - A = alias_A ? A : deepcopy(A) + A = if alias_A + A + elseif A isa Array || A isa SparseMatrixCSC + copy(A) + else + deepcopy(A) + end + b = if b isa SparseArrays.AbstractSparseArray && !(A isa Diagonal) Array(b) # the solution to a linear solve will always be dense! elseif alias_b b + elseif b isa Array || b isa SparseMatrixCSC + copy(b) else deepcopy(b) end diff --git a/src/factorization.jl b/src/factorization.jl index 5495e4a8e..15386640b 100644 --- a/src/factorization.jl +++ b/src/factorization.jl @@ -129,6 +129,12 @@ function do_factorization(alg::LDLtFactorization, A, b, u) return fact end +function init_cacheval(alg::LDLtFactorization, A::Union{Nothing,Matrix}, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, + verbose::Bool, assumptions::OperatorAssumptions) + nothing +end + function init_cacheval(alg::LDLtFactorization, A, b, u, Pl, Pr, maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) @@ -367,6 +373,26 @@ Base.@kwdef struct UMFPACKFactorization <: AbstractFactorization check_pattern::Bool = true # Check factorization re-use end +@static if VERSION < v"1.9.0-DEV.1622" + const PREALLOCATED_UMFPACK = SuiteSparse.UMFPACK.UmfpackLU(C_NULL, C_NULL, 0, 0, + [0], Int64[], Float64[], 0) + finalizer(SuiteSparse.UMFPACK.umfpack_free_symbolic, PREALLOCATED_UMFPACK) +else + const PREALLOCATED_UMFPACK = SuiteSparse.UMFPACK.UmfpackLU(SparseMatrixCSC(0,0, [1], Int64[], Float64[])) +end + +function init_cacheval(alg::UMFPACKFactorization, A::Union{Nothing,Matrix}, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, + verbose::Bool, assumptions::OperatorAssumptions) + nothing +end + +function init_cacheval(alg::UMFPACKFactorization, A::SparseMatrixCSC{Float64,Int}, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, + verbose::Bool, assumptions::OperatorAssumptions) + PREALLOCATED_UMFPACK +end + function init_cacheval(alg::UMFPACKFactorization, A, b, u, Pl, Pr, maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) @@ -404,7 +430,7 @@ function SciMLBase.solve!(cache::LinearCache, alg::UMFPACKFactorization; kwargs. A = cache.A A = convert(AbstractMatrix, A) if cache.isfresh - if cache.cacheval !== nothing && alg.reuse_symbolic + if alg.reuse_symbolic # Caches the symbolic factorization: https://github.com/JuliaLang/julia/pull/33738 if alg.check_pattern && !(SuiteSparse.decrement(SparseArrays.getcolptr(A)) == cache.cacheval.colptr && @@ -432,6 +458,20 @@ Base.@kwdef struct KLUFactorization <: AbstractFactorization check_pattern::Bool = true end +const PREALLOCATED_KLU = KLU.KLUFactorization(SparseMatrixCSC(0,0, [1], Int64[], Float64[])) + +function init_cacheval(alg::KLUFactorization, A::Union{Matrix,Nothing}, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, + verbose::Bool, assumptions::OperatorAssumptions) + nothing +end + +function init_cacheval(alg::KLUFactorization, A::SparseMatrixCSC{Float64,Int}, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, + verbose::Bool, assumptions::OperatorAssumptions) + PREALLOCATED_KLU +end + function init_cacheval(alg::KLUFactorization, A, b, u, Pl, Pr, maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) @@ -728,6 +768,20 @@ Base.@kwdef struct SparspakFactorization <: AbstractFactorization reuse_symbolic::Bool = true end +const PREALLOCATED_SPARSEPAK = sparspaklu(SparseMatrixCSC(0,0, [1], Int64[], Float64[]), factorize = false) + +function init_cacheval(alg::KLUFactorization, A::Union{Matrix,Nothing}, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, + verbose::Bool, assumptions::OperatorAssumptions) + nothing +end + +function init_cacheval(::SparspakFactorization, A::SparseMatrixCSC{Float64, Int}, b, u, Pl, Pr, maxiters::Int, abstol, + reltol, + verbose::Bool, assumptions::OperatorAssumptions) + PREALLOCATED_SPARSEPAK +end + function init_cacheval(::SparspakFactorization, A, b, u, Pl, Pr, maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) From 07c192500b443240add07abaf6a1ec25a1860d2d Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Tue, 30 May 2023 09:57:43 -0700 Subject: [PATCH 07/19] A bunch of performance improvements --- src/default.jl | 22 +++++++++++++++++++--- src/factorization.jl | 37 ++++++++++++++++++++++++++++++++++++- test/default_algs.jl | 23 ++++++++++++++--------- 3 files changed, 69 insertions(+), 13 deletions(-) diff --git a/src/default.jl b/src/default.jl index 47e8cab6e..e5e62846a 100644 --- a/src/default.jl +++ b/src/default.jl @@ -267,9 +267,25 @@ cache.cacheval = NamedTuple(LUFactorization = cache of LUFactorization, ...) """ @generated function init_cacheval(alg::DefaultLinearSolver, A, b, u, Pl, Pr, maxiters::Int, abstol, reltol, verbose::Bool, assump::OperatorAssumptions) - caches = [:(init_cacheval($(algchoice_to_alg(alg)), A, b, u, Pl, Pr, maxiters, abstol, reltol, - verbose, - assump)) for alg in first.(EnumX.symbol_map(DefaultAlgorithmChoice.T))] + caches = map(first.(EnumX.symbol_map(DefaultAlgorithmChoice.T))) do alg + if alg === :KrylovJL_GMRES + quote + if A isa Matrix || A isa SparseMatrixCSC + nothing + else + init_cacheval($(algchoice_to_alg(alg)), A, b, u, Pl, Pr, maxiters, abstol, reltol, + verbose, + assump) + end + end + else + quote + init_cacheval($(algchoice_to_alg(alg)), A, b, u, Pl, Pr, maxiters, abstol, reltol, + verbose, + assump) + end + end + end Expr(:call,:DefaultLinearSolverInit, caches...) end diff --git a/src/factorization.jl b/src/factorization.jl index 15386640b..9fa293c0b 100644 --- a/src/factorization.jl +++ b/src/factorization.jl @@ -75,6 +75,14 @@ function init_cacheval(alg::Union{LUFactorization, GenericLUFactorization}, A, b ArrayInterface.lu_instance(convert(AbstractMatrix, A)) end +const PREALLOCATED_LU = ArrayInterface.lu_instance(rand(1,1)) + +function init_cacheval(alg::Union{LUFactorization, GenericLUFactorization}, A::Matrix{Float64}, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) + PREALLOCATED_LU +end + ## QRFactorization struct QRFactorization{P} <: AbstractFactorization @@ -108,6 +116,14 @@ function init_cacheval(alg::QRFactorization, A, b, u, Pl, Pr, ArrayInterface.qr_instance(convert(AbstractMatrix, A)) end +const PREALLOCATED_QR = ArrayInterface.qr_instance(rand(1,1)) + +function init_cacheval(alg::QRFactorization, A::Matrix{Float64}, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) + PREALLOCATED_QR +end + ## LDLtFactorization struct LDLtFactorization{T} <: AbstractFactorization @@ -162,6 +178,14 @@ function init_cacheval(alg::SVDFactorization, A, b, u, Pl, Pr, ArrayInterface.svd_instance(convert(AbstractMatrix, A)) end +const PREALLOCATED_SVD = ArrayInterface.svd_instance(rand(1,1)) + +function init_cacheval(alg::SVDFactorization, A::Matrix{Float64}, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) + PREALLOCATED_SVD +end + ## GenericFactorization struct GenericFactorization{F} <: AbstractFactorization @@ -536,6 +560,17 @@ function init_cacheval(alg::RFLUFactorization, A, b, u, Pl, Pr, maxiters::Int, ArrayInterface.lu_instance(convert(AbstractMatrix, A)), ipiv end +function init_cacheval(alg::RFLUFactorization, A::Matrix{Float64}, b, u, Pl, Pr, maxiters::Int, + abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + ipiv = Vector{LinearAlgebra.BlasInt}(undef, min(size(A)...)) + PREALLOCATED_LU, ipiv +end + +function init_cacheval(alg::RFLUFactorization, A::AbstractSparseArray, b, u, Pl, Pr, maxiters::Int, + abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + nothing,nothing +end + function SciMLBase.solve!(cache::LinearCache, alg::RFLUFactorization{P, T}; kwargs...) where {P, T} A = cache.A @@ -770,7 +805,7 @@ end const PREALLOCATED_SPARSEPAK = sparspaklu(SparseMatrixCSC(0,0, [1], Int64[], Float64[]), factorize = false) -function init_cacheval(alg::KLUFactorization, A::Union{Matrix,Nothing}, b, u, Pl, Pr, +function init_cacheval(alg::SparspakFactorization, A::Union{Matrix,Nothing}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) nothing diff --git a/test/default_algs.jl b/test/default_algs.jl index becb9b07d..c1d0b6321 100644 --- a/test/default_algs.jl +++ b/test/default_algs.jl @@ -1,13 +1,18 @@ using LinearSolve, LinearAlgebra, SparseArrays, Test -@test LinearSolve.defaultalg(nothing, zeros(3)) isa GenericLUFactorization -@test LinearSolve.defaultalg(nothing, zeros(50)) isa RFLUFactorization -@test LinearSolve.defaultalg(nothing, zeros(600)) isa LUFactorization -@test LinearSolve.defaultalg(LinearAlgebra.Diagonal(zeros(5)), zeros(5)) isa - DiagonalFactorization +@test LinearSolve.defaultalg(nothing, zeros(3)).alg === DefaultAlgorithmChoice.GenericLUFactorization +@test LinearSolve.defaultalg(nothing, zeros(50)).alg === DefaultAlgorithmChoice.RFLUFactorization +@test LinearSolve.defaultalg(nothing, zeros(600)).alg === DefaultAlgorithmChoice.LUFactorization +@test LinearSolve.defaultalg(LinearAlgebra.Diagonal(zeros(5)), zeros(5)).alg === DefaultAlgorithmChoice.DiagonalFactorization @test LinearSolve.defaultalg(nothing, zeros(5), - LinearSolve.OperatorAssumptions(false)) isa QRFactorization + LinearSolve.OperatorAssumptions(false)).alg === DefaultAlgorithmChoice.QRFactorization -@test LinearSolve.defaultalg(sprand(1000, 1000, 0.01), zeros(1000)) isa KLUFactorization -@test LinearSolve.defaultalg(sprand(11000, 11000, 0.001), zeros(11000)) isa - UMFPACKFactorization +@test LinearSolve.defaultalg(sprand(1000, 1000, 0.01), zeros(1000)).alg === DefaultAlgorithmChoice.KLUFactorization +@test LinearSolve.defaultalg(sprand(11000, 11000, 0.001), zeros(11000)).alg === DefaultAlgorithmChoice.UMFPACKFactorization + +# Test inference +A = rand(4, 4) +b = rand(4) +prob = LinearProblem(A, b) +@inferred solve(prob) +@inferred init(prob, nothing) \ No newline at end of file From 0e9cdbe7f3a4eeea6a7f4fb6cf4af2b3c6d87060 Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Tue, 30 May 2023 16:11:42 -0700 Subject: [PATCH 08/19] Get tests passing --- docs/src/solvers/solvers.md | 4 +- ext/LinearSolveHYPREExt.jl | 3 +- src/LinearSolve.jl | 3 +- src/default.jl | 50 ++++++-- src/factorization.jl | 225 +++++++++++++++++++++++++++++++++--- test/basictests.jl | 4 +- test/default_algs.jl | 14 +-- test/resolve.jl | 33 ++++++ 8 files changed, 296 insertions(+), 40 deletions(-) diff --git a/docs/src/solvers/solvers.md b/docs/src/solvers/solvers.md index e3f153ae0..cfe4dbb81 100644 --- a/docs/src/solvers/solvers.md +++ b/docs/src/solvers/solvers.md @@ -89,7 +89,9 @@ customized per-package, details given below describe a subset of important array When `A` is sparse, a similar polyalgorithm is used. For indefinite matrices, the `LDLt` factorization does not use pivoting during the numerical factorization and therefore the procedure can fail even for invertible matrices. - + - CholeskyFactorization + - BunchKaufmanFactorization + - CHOLMODFactorization ### LinearSolve.jl LinearSolve.jl contains some linear solvers built in. diff --git a/ext/LinearSolveHYPREExt.jl b/ext/LinearSolveHYPREExt.jl index ab6e086b2..20bc044ec 100644 --- a/ext/LinearSolveHYPREExt.jl +++ b/ext/LinearSolveHYPREExt.jl @@ -88,8 +88,7 @@ function SciMLBase.init(prob::LinearProblem, alg::HYPREAlgorithm, cache = LinearCache{ typeof(A), typeof(b), typeof(u0), typeof(p), typeof(alg), Tc, - typeof(Pl), typeof(Pr), typeof(reltol), __issquare(assumptions), - __conditioning(assumptions) + typeof(Pl), typeof(Pr), typeof(reltol), typeof(__issquare(assumptions)) }(A, b, u0, p, alg, cacheval, isfresh, Pl, Pr, abstol, reltol, maxiters, verbose, assumptions) diff --git a/src/LinearSolve.jl b/src/LinearSolve.jl index 2be83aba1..351709673 100644 --- a/src/LinearSolve.jl +++ b/src/LinearSolve.jl @@ -111,7 +111,8 @@ export LUFactorization, SVDFactorization, QRFactorization, GenericFactorization, GenericLUFactorization, SimpleLUFactorization, RFLUFactorization, NormalCholeskyFactorization, NormalBunchKaufmanFactorization, UMFPACKFactorization, KLUFactorization, FastLUFactorization, FastQRFactorization, - SparspakFactorization, DiagonalFactorization + SparspakFactorization, DiagonalFactorization, CholeskyFactorization, + BunchKaufmanFactorization, CHOLMODFactorization, LDLtFactorization export LinearSolveFunction, DirectLdiv! diff --git a/src/default.jl b/src/default.jl index e5e62846a..d0991630f 100644 --- a/src/default.jl +++ b/src/default.jl @@ -11,14 +11,18 @@ EnumX.@enumx DefaultAlgorithmChoice begin RowMaximumGenericLUFactorization RFLUFactorization LDLtFactorization + BunchKaufmanFactorization + CHOLMODFactorization SVDFactorization + CholeskyFactorization + NormalCholeskyFactorization end struct DefaultLinearSolver <: SciMLLinearSolveAlgorithm alg::DefaultAlgorithmChoice.T end -mutable struct DefaultLinearSolverInit{T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13} +mutable struct DefaultLinearSolverInit{T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17} LUFactorization::T1 QRFactorization::T2 DiagonalFactorization::T3 @@ -31,7 +35,11 @@ mutable struct DefaultLinearSolverInit{T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T1 RowMaximumGenericLUFactorization::T10 RFLUFactorization::T11 LDLtFactorization::T12 - SVDFactorization::T13 + BunchKaufmanFactorization::T13 + CHOLMODFactorization::T14 + SVDFactorization::T15 + CholeskyFactorization::T16 + NormalCholeskyFactorization::T17 end # Legacy fallback @@ -48,7 +56,7 @@ function defaultalg(A, b, assump::OperatorAssumptions{Nothing}) DefaultLinearSolver(defaultalg(A, b, OperatorAssumptions(issq, assump.condition))) end -function defaultalg(A::Tridiagonal, b, assump::OperatorAssumptions{true}) +function defaultalg(A::Tridiagonal, b, assump::OperatorAssumptions) if assump.issq DefaultLinearSolver(DefaultAlgorithmChoice.LUFactorization) else @@ -69,8 +77,20 @@ function defaultalg(A::Diagonal, b, ::OperatorAssumptions) DefaultLinearSolver(DefaultAlgorithmChoice.DiagonalFactorization) end +function defaultalg(A::Hermitian, b, ::OperatorAssumptions) + DefaultLinearSolver(DefaultAlgorithmChoice.CholeskyFactorization) +end + +function defaultalg(A::Symmetric{<:Number,<:Array}, b, ::OperatorAssumptions) + DefaultLinearSolver(DefaultAlgorithmChoice.BunchKaufmanFactorization) +end + +function defaultalg(A::Symmetric{<:Number,<:SparseMatrixCSC}, b, ::OperatorAssumptions) + DefaultLinearSolver(DefaultAlgorithmChoice.CHOLMODFactorization) +end + function defaultalg(A::AbstractSparseMatrixCSC{Tv, Ti}, b, - assump::OperatorAssumptions{true}) where {Tv, Ti} + assump::OperatorAssumptions) where {Tv, Ti} if assump.issq DefaultLinearSolver(DefaultAlgorithmChoice.SparspakFactorization) else @@ -80,7 +100,7 @@ end @static if INCLUDE_SPARSE function defaultalg(A::AbstractSparseMatrixCSC{<:Union{Float64, ComplexF64}, Ti}, b, - assump::OperatorAssumptions{true}) where {Ti} + assump::OperatorAssumptions) where {Ti} if assump.issq if length(b) <= 10_000 DefaultLinearSolver(DefaultAlgorithmChoice.KLUFactorization) @@ -88,7 +108,7 @@ end DefaultLinearSolver(DefaultAlgorithmChoice.UMFPACKFactorization) end else - error("Non-square sparse factorization case not handled") + DefaultLinearSolver(DefaultAlgorithmChoice.QRFactorization) end end end @@ -149,11 +169,11 @@ end # Allows A === nothing as a stand-in for dense matrix function defaultalg(A, b, assump::OperatorAssumptions) - if assump.issq + alg = if assump.issq # Special case on Arrays: avoid BLAS for RecursiveFactorization.jl when # it makes sense according to the benchmarks, which is dependent on # whether MKL or OpenBLAS is being used - alg = if (A === nothing && !(b isa GPUArraysCore.AbstractGPUArray)) || A isa Matrix + if (A === nothing && !(b isa GPUArraysCore.AbstractGPUArray)) || A isa Matrix if (A === nothing || eltype(A) <: Union{Float32, Float64, ComplexF32, ComplexF64}) && ArrayInterface.can_setindex(b) && (__conditioning(assump) === OperatorCondition.IllConditioned || @@ -188,7 +208,7 @@ function defaultalg(A, b, assump::OperatorAssumptions) # This catches the cases where a factorization overload could exist # For example, BlockBandedMatrix elseif A !== nothing && ArrayInterface.isstructured(A) - DefaultAlgorithmChoice.GenericFactorization + error("Special factorization not handled in current default algorithm") # Not factorizable operator, default to only using A*x else @@ -202,6 +222,8 @@ function defaultalg(A, b, assump::OperatorAssumptions) DefaultAlgorithmChoice.QRFactorization elseif assump.condition === OperatorCondition.SuperIllConditioned DefaultAlgorithmChoice.SVDFactorization + else + error("Special factorization not handled in current default algorithm") end DefaultLinearSolver(alg) end @@ -233,6 +255,14 @@ function algchoice_to_alg(alg::Symbol) GenericLUFactorization() elseif alg === :RFLUFactorization RFLUFactorization() + elseif alg === :BunchKaufmanFactorization + BunchKaufmanFactorization() + elseif alg === :CHOLMODFactorization + CHOLMODFactorization() + elseif alg === :CholeskyFactorization + CholeskyFactorization() + elseif alg === :NormalCholeskyFactorization + NormalCholeskyFactorization() else error("Algorithm choice symbol $alg not allowed in the default") end @@ -316,7 +346,7 @@ end end end -defaultalg_symbol(::Type{T}) where T = Symbol(SciMLBase.parameterless_type(T)) +defaultalg_symbol(::Type{T}) where T = Symbol(split(string(SciMLBase.parameterless_type(T)), ".")[end]) defaultalg_symbol(::Type{<:GenericLUFactorization{LinearAlgebra.RowMaximum}}) = :RowMaximumGenericLUFactorization defaultalg_symbol(::Type{<:GenericFactorization{typeof(ldlt!)}}) = :LDLtFactorization diff --git a/src/factorization.jl b/src/factorization.jl index 9fa293c0b..18ed4c1e4 100644 --- a/src/factorization.jl +++ b/src/factorization.jl @@ -14,7 +14,17 @@ end cache.isfresh = false end y = _ldiv!(cache.u, get_cacheval(cache, $(Meta.quot(defaultalg_symbol(alg)))), cache.b) + + #= + retcode = if LinearAlgebra.issuccess(fact) + SciMLBase.ReturnCode.Success + else + SciMLBase.ReturnCode.Failure + end + SciMLBase.build_linear_solution(alg, y, nothing, cache; retcode = retcode) + =# SciMLBase.build_linear_solution(alg, y, nothing, cache) + end end @@ -83,6 +93,12 @@ function init_cacheval(alg::Union{LUFactorization, GenericLUFactorization}, A::M PREALLOCATED_LU end +function init_cacheval(alg::Union{LUFactorization, GenericLUFactorization}, A::AbstractSciMLOperator, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) + nothing +end + ## QRFactorization struct QRFactorization{P} <: AbstractFactorization @@ -124,6 +140,76 @@ function init_cacheval(alg::QRFactorization, A::Matrix{Float64}, b, u, Pl, Pr, PREALLOCATED_QR end +function init_cacheval(alg::QRFactorization, A::AbstractSciMLOperator, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) + nothing +end + +## CholeskyFactorization + +struct CholeskyFactorization{P,P2} <: AbstractFactorization + pivot::P + tol::Int + shift::Float64 + perm::P2 +end + +function CholeskyFactorization(;pivot = nothing, tol=0.0, shift = 0.0, perm = nothing) + if pivot === nothing + pivot = @static if VERSION < v"1.7beta" + Val(false) + else + NoPivot() + end + end + CholeskyFactorization(pivot, 16, shift, perm) +end + +function do_factorization(alg::CholeskyFactorization, A, b, u) + A = convert(AbstractMatrix, A) + if A isa SparseMatrixCSC + fact = cholesky!(A; shift = alg.shift, check = false, perm = alg.perm) + elseif alg.pivot === Val(false) || alg.pivot === NoPivot() + fact = cholesky!(A, alg.pivot; check=false) + else + fact = cholesky!(A, alg.pivot; tol=alg.tol, check=false) + end + return fact +end + +function init_cacheval(alg::CholeskyFactorization, A, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) + ArrayInterface.cholesky_instance(convert(AbstractMatrix, A), alg.pivot) +end + +@static if VERSION < v"1.7beta" + cholpivot = Val(false) +else + cholpivot = NoPivot() +end + +const PREALLOCATED_CHOLESKY = ArrayInterface.cholesky_instance(rand(1,1), cholpivot) + +function init_cacheval(alg::CholeskyFactorization, A::Matrix{Float64}, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) + PREALLOCATED_CHOLESKY +end + +function init_cacheval(alg::CholeskyFactorization, A::Diagonal, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) + nothing +end + +function init_cacheval(alg::CholeskyFactorization, A::AbstractSciMLOperator, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) + nothing +end + ## LDLtFactorization struct LDLtFactorization{T} <: AbstractFactorization @@ -145,13 +231,13 @@ function do_factorization(alg::LDLtFactorization, A, b, u) return fact end -function init_cacheval(alg::LDLtFactorization, A::Union{Nothing,Matrix}, b, u, Pl, Pr, +function init_cacheval(alg::LDLtFactorization, A, b, u, Pl, Pr, maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) nothing end -function init_cacheval(alg::LDLtFactorization, A, b, u, Pl, Pr, +function init_cacheval(alg::LDLtFactorization, A::SymTridiagonal, b, u, Pl, Pr, maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) ArrayInterface.ldlt_instance(convert(AbstractMatrix, A)) @@ -172,7 +258,7 @@ function do_factorization(alg::SVDFactorization, A, b, u) return fact end -function init_cacheval(alg::SVDFactorization, A, b, u, Pl, Pr, +function init_cacheval(alg::SVDFactorization, A::Matrix, b, u, Pl, Pr, maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) ArrayInterface.svd_instance(convert(AbstractMatrix, A)) @@ -186,6 +272,44 @@ function init_cacheval(alg::SVDFactorization, A::Matrix{Float64}, b, u, Pl, Pr, PREALLOCATED_SVD end +function init_cacheval(alg::SVDFactorization, A, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) + nothing +end + +## BunchKaufmanFactorization + +Base.@kwdef struct BunchKaufmanFactorization <: AbstractFactorization + rook::Bool = false +end + +function do_factorization(alg::BunchKaufmanFactorization, A, b, u) + A = convert(AbstractMatrix, A) + fact = bunchkaufman!(A, alg.rook; check = false) + return fact +end + +function init_cacheval(alg::BunchKaufmanFactorization, A::Symmetric{<:Number,<:Matrix}, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) + ArrayInterface.bunchkaufman_instance(convert(AbstractMatrix, A)) +end + +const PREALLOCATED_BUNCHKAUFMAN = ArrayInterface.bunchkaufman_instance(Symmetric(rand(1,1))) + +function init_cacheval(alg::BunchKaufmanFactorization, A::Symmetric{Float64,Matrix{Float64}}, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) + PREALLOCATED_BUNCHKAUFMAN +end + +function init_cacheval(alg::BunchKaufmanFactorization, A, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) + nothing +end + ## GenericFactorization struct GenericFactorization{F} <: AbstractFactorization @@ -405,7 +529,7 @@ else const PREALLOCATED_UMFPACK = SuiteSparse.UMFPACK.UmfpackLU(SparseMatrixCSC(0,0, [1], Int64[], Float64[])) end -function init_cacheval(alg::UMFPACKFactorization, A::Union{Nothing,Matrix}, b, u, Pl, Pr, +function init_cacheval(alg::UMFPACKFactorization, A::Union{Nothing,Matrix,AbstractSciMLOperator}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) nothing @@ -484,7 +608,7 @@ end const PREALLOCATED_KLU = KLU.KLUFactorization(SparseMatrixCSC(0,0, [1], Int64[], Float64[])) -function init_cacheval(alg::KLUFactorization, A::Union{Matrix,Nothing}, b, u, Pl, Pr, +function init_cacheval(alg::KLUFactorization, A::Union{Matrix,Nothing,AbstractSciMLOperator}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) nothing @@ -511,22 +635,24 @@ end function SciMLBase.solve!(cache::LinearCache, alg::KLUFactorization; kwargs...) A = cache.A A = convert(AbstractMatrix, A) + if cache.isfresh - if cache.cacheval !== nothing && alg.reuse_symbolic + cacheval = get_cacheval(cache,:KLUFactorization) + if cacheval !== nothing && alg.reuse_symbolic if alg.check_pattern && !(SuiteSparse.decrement(SparseArrays.getcolptr(A)) == - cache.cacheval.colptr && - SuiteSparse.decrement(SparseArrays.getrowval(A)) == get_cacheval(cache,:KLUFactorization).rowval) + cacheval.colptr && + SuiteSparse.decrement(SparseArrays.getrowval(A)) == cacheval.rowval) fact = KLU.klu(SparseMatrixCSC(size(A)..., getcolptr(A), rowvals(A), nonzeros(A))) else # If we have a cacheval already, run umfpack_symbolic to ensure the symbolic factorization exists # This won't recompute if it does. - KLU.klu_analyze!(cache.cacheval) + KLU.klu_analyze!(cacheval) copyto!(cache.cacheval.nzval, nonzeros(A)) if cache.cacheval._numeric === C_NULL # We MUST have a numeric factorization for reuse, unlike UMFPACK. - KLU.klu_factor!(cache.cacheval) + KLU.klu_factor!(cacheval) end - fact = KLU.klu!(get_cacheval(cache, :KLUFactorization), + fact = KLU.klu!(cacheval, SparseMatrixCSC(size(A)..., getcolptr(A), rowvals(A), nonzeros(A))) end @@ -544,6 +670,51 @@ function SciMLBase.solve!(cache::LinearCache, alg::KLUFactorization; kwargs...) SciMLBase.build_linear_solution(alg, y, nothing, cache) end +## CHOLMODFactorization + +Base.@kwdef struct CHOLMODFactorization{T} <: AbstractFactorization + shift::Float64 = 0.0 + perm::T = nothing +end + +const PREALLOCATED_CHOLMOD = cholesky(SparseMatrixCSC(0,0, [1], Int64[], Float64[])) + +function init_cacheval(alg::CHOLMODFactorization, A::Union{Matrix,Nothing,AbstractSciMLOperator}, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, + verbose::Bool, assumptions::OperatorAssumptions) + nothing +end + +function init_cacheval(alg::CHOLMODFactorization, A::SparseMatrixCSC{Float64,Int}, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, + verbose::Bool, assumptions::OperatorAssumptions) + PREALLOCATED_CHOLMOD +end + +function init_cacheval(alg::CHOLMODFactorization, A, b, u, Pl, Pr, maxiters::Int, abstol, + reltol, + verbose::Bool, assumptions::OperatorAssumptions) + cholesky(SparseMatrixCSC(0,0, [1], Int64[], eltype(A)[])) +end + +function SciMLBase.solve!(cache::LinearCache, alg::CHOLMODFactorization; kwargs...) + A = cache.A + A = convert(AbstractMatrix, A) + + if cache.isfresh + cacheval = get_cacheval(cache,:CHOLMODFactorization) + fact = cholesky(A; check = false) + if !LinearAlgebra.issuccess(fact) + ldlt!(fact, A; check = false) + end + cache.cacheval = fact + cache.isfresh = false + end + + cache.u .= get_cacheval(cache, :CHOLMODFactorization) \ cache.b + SciMLBase.build_linear_solution(alg, cache.u, nothing, cache) +end + ## RFLUFactorization struct RFLUFactorization{P, T} <: AbstractFactorization @@ -566,7 +737,7 @@ function init_cacheval(alg::RFLUFactorization, A::Matrix{Float64}, b, u, Pl, Pr, PREALLOCATED_LU, ipiv end -function init_cacheval(alg::RFLUFactorization, A::AbstractSparseArray, b, u, Pl, Pr, maxiters::Int, +function init_cacheval(alg::RFLUFactorization, A::Union{AbstractSparseArray,AbstractSciMLOperator}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) nothing,nothing end @@ -605,21 +776,41 @@ end default_alias_A(::NormalCholeskyFactorization, ::Any, ::Any) = true default_alias_b(::NormalCholeskyFactorization, ::Any, ::Any) = true +@static if VERSION < v"1.7beta" + normcholpivot = Val(false) +else + normcholpivot = RowMaximum() +end + +const PREALLOCATED_NORMALCHOLESKY = ArrayInterface.cholesky_instance(rand(1,1), normcholpivot) + +function init_cacheval(alg::NormalCholeskyFactorization, A::Union{AbstractSparseArray,Symmetric{<:Number,<:AbstractSparseArray}}, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) + ArrayInterface.cholesky_instance(convert(AbstractMatrix, A)) +end + function init_cacheval(alg::NormalCholeskyFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) ArrayInterface.cholesky_instance(convert(AbstractMatrix, A), alg.pivot) end +function init_cacheval(alg::NormalCholeskyFactorization, A::Union{Diagonal,AbstractSciMLOperator}, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) + nothing +end + function SciMLBase.solve!(cache::LinearCache, alg::NormalCholeskyFactorization; kwargs...) A = cache.A A = convert(AbstractMatrix, A) if cache.isfresh if A isa SparseMatrixCSC - fact = cholesky(Symmetric((A)' * A)) + fact = cholesky(Symmetric((A)' * A, :L)) else - fact = cholesky(Symmetric((A)' * A), alg.pivot) + fact = cholesky(Symmetric((A)' * A, :L), alg.pivot) end cache.cacheval = fact cache.isfresh = false @@ -805,7 +996,7 @@ end const PREALLOCATED_SPARSEPAK = sparspaklu(SparseMatrixCSC(0,0, [1], Int64[], Float64[]), factorize = false) -function init_cacheval(alg::SparspakFactorization, A::Union{Matrix,Nothing}, b, u, Pl, Pr, +function init_cacheval(alg::SparspakFactorization, A::Union{Matrix,Nothing,AbstractSciMLOperator}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) nothing diff --git a/test/basictests.jl b/test/basictests.jl index 4e241b12c..8aa6fe460 100644 --- a/test/basictests.jl +++ b/test/basictests.jl @@ -405,8 +405,8 @@ end prob1 = LinearProblem(op1, b1; u0 = x1) prob2 = LinearProblem(op2, b2; u0 = x2) - @test LinearSolve.defaultalg(op1, x1) isa DirectLdiv! - @test LinearSolve.defaultalg(op2, x2) isa DirectLdiv! + @test LinearSolve.defaultalg(op1, x1).alg === LinearSolve.DefaultAlgorithmChoice.DirectLdiv! + @test LinearSolve.defaultalg(op2, x2).alg === LinearSolve.DefaultAlgorithmChoice.DirectLdiv! test_interface(DirectLdiv!(), prob1, prob2) test_interface(nothing, prob1, prob2) diff --git a/test/default_algs.jl b/test/default_algs.jl index c1d0b6321..1b25d17a2 100644 --- a/test/default_algs.jl +++ b/test/default_algs.jl @@ -1,14 +1,14 @@ using LinearSolve, LinearAlgebra, SparseArrays, Test -@test LinearSolve.defaultalg(nothing, zeros(3)).alg === DefaultAlgorithmChoice.GenericLUFactorization -@test LinearSolve.defaultalg(nothing, zeros(50)).alg === DefaultAlgorithmChoice.RFLUFactorization -@test LinearSolve.defaultalg(nothing, zeros(600)).alg === DefaultAlgorithmChoice.LUFactorization -@test LinearSolve.defaultalg(LinearAlgebra.Diagonal(zeros(5)), zeros(5)).alg === DefaultAlgorithmChoice.DiagonalFactorization +@test LinearSolve.defaultalg(nothing, zeros(3)).alg === LinearSolve.DefaultAlgorithmChoice.RowMaximumGenericLUFactorization +@test LinearSolve.defaultalg(nothing, zeros(50)).alg === LinearSolve.DefaultAlgorithmChoice.RFLUFactorization +@test LinearSolve.defaultalg(nothing, zeros(600)).alg === LinearSolve.DefaultAlgorithmChoice.RowMaximumGenericLUFactorization +@test LinearSolve.defaultalg(LinearAlgebra.Diagonal(zeros(5)), zeros(5)).alg === LinearSolve.DefaultAlgorithmChoice.DiagonalFactorization @test LinearSolve.defaultalg(nothing, zeros(5), - LinearSolve.OperatorAssumptions(false)).alg === DefaultAlgorithmChoice.QRFactorization + LinearSolve.OperatorAssumptions(false)).alg === LinearSolve.DefaultAlgorithmChoice.QRFactorization -@test LinearSolve.defaultalg(sprand(1000, 1000, 0.01), zeros(1000)).alg === DefaultAlgorithmChoice.KLUFactorization -@test LinearSolve.defaultalg(sprand(11000, 11000, 0.001), zeros(11000)).alg === DefaultAlgorithmChoice.UMFPACKFactorization +@test LinearSolve.defaultalg(sprand(1000, 1000, 0.01), zeros(1000)).alg === LinearSolve.DefaultAlgorithmChoice.KLUFactorization +@test LinearSolve.defaultalg(sprand(11000, 11000, 0.001), zeros(11000)).alg === LinearSolve.DefaultAlgorithmChoice.UMFPACKFactorization # Test inference A = rand(4, 4) diff --git a/test/resolve.jl b/test/resolve.jl index c0c9de889..05a0490c5 100644 --- a/test/resolve.jl +++ b/test/resolve.jl @@ -7,16 +7,24 @@ for alg in subtypes(LinearSolve.AbstractFactorization) alg in [KLUFactorization, UMFPACKFactorization, SparspakFactorization] && (A = sparse(A)) A = A' * A + @show A + alg in [CHOLMODFactorization] && (A = sparse(Symmetric(A,:L))) + alg in [BunchKaufmanFactorization] && (A = Symmetric(A,:L)) + alg in [LDLtFactorization] && (A = SymTridiagonal(A)) b = [1.0, 2.0] prob = LinearProblem(A, b) linsolve = init(prob, alg(), alias_A = false, alias_b = false) @test solve!(linsolve).u ≈ [-2.0, 1.5] @test !linsolve.isfresh @test solve!(linsolve).u ≈ [-2.0, 1.5] + A = [1.0 2.0; 3.0 4.0] alg in [KLUFactorization, UMFPACKFactorization, SparspakFactorization] && (A = sparse(A)) A = A' * A + alg in [CHOLMODFactorization] && (A = sparse(Symmetric(A,:L))) + alg in [BunchKaufmanFactorization] && (A = Symmetric(A,:L)) + alg in [LDLtFactorization] && (A = SymTridiagonal(A)) linsolve.A = A @test linsolve.isfresh @test solve!(linsolve).u ≈ [-2.0, 1.5] @@ -32,3 +40,28 @@ linsolve = init(prob, DiagonalFactorization(), alias_A = false, alias_b = false) A = Diagonal([1.0, 4.0]) linsolve.A = A @test solve!(linsolve).u ≈ [1.0, 0.5] + +A = Symmetric([1.0 2.0 + 2.0 1.0]) +b = [1.0, 2.0] +prob = LinearProblem(A, b) +linsolve = init(prob, BunchKaufmanFactorization(), alias_A = false, alias_b = false) +@test solve!(linsolve).u ≈ [1.0, 0.0] +@test solve!(linsolve).u ≈ [1.0, 0.0] +A = Symmetric([1.0 2.0 + 2.0 1.0]) +linsolve.A = A +@test solve!(linsolve).u ≈ [1.0, 0.0] + +A = Symmetric([1.0 2.0 + 2.0 1.0]) +b = [1.0, 2.0] +prob = LinearProblem(A, b) +linsolve = init(prob, CholeskyFactorization(), alias_A = false, alias_b = false) +@test solve!(linsolve).u ≈ [1.0, 0.0] +@test solve!(linsolve).u ≈ [1.0, 0.0] +A = Symmetric([1.0 2.0 + 2.0 1.0]) +b = [1.0, 2.0] +@test solve!(linsolve).u ≈ [1.0, 0.0] + From 44cc6803a9c1173cf5f38c5e96fef6ee3a55c392 Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Tue, 30 May 2023 18:01:17 -0700 Subject: [PATCH 09/19] format --- docs/src/solvers/solvers.md | 1 + ext/LinearSolveHYPREExt.jl | 3 +- src/common.jl | 6 +- src/default.jl | 84 +++++++------ src/factorization.jl | 234 ++++++++++++++++++++---------------- src/iterative_wrappers.jl | 16 +-- test/basictests.jl | 6 +- test/default_algs.jl | 23 ++-- test/resolve.jl | 9 +- 9 files changed, 217 insertions(+), 165 deletions(-) diff --git a/docs/src/solvers/solvers.md b/docs/src/solvers/solvers.md index cfe4dbb81..c40b1b182 100644 --- a/docs/src/solvers/solvers.md +++ b/docs/src/solvers/solvers.md @@ -92,6 +92,7 @@ customized per-package, details given below describe a subset of important array - CholeskyFactorization - BunchKaufmanFactorization - CHOLMODFactorization + ### LinearSolve.jl LinearSolve.jl contains some linear solvers built in. diff --git a/ext/LinearSolveHYPREExt.jl b/ext/LinearSolveHYPREExt.jl index 20bc044ec..8005e6131 100644 --- a/ext/LinearSolveHYPREExt.jl +++ b/ext/LinearSolveHYPREExt.jl @@ -88,7 +88,8 @@ function SciMLBase.init(prob::LinearProblem, alg::HYPREAlgorithm, cache = LinearCache{ typeof(A), typeof(b), typeof(u0), typeof(p), typeof(alg), Tc, - typeof(Pl), typeof(Pr), typeof(reltol), typeof(__issquare(assumptions)) + typeof(Pl), typeof(Pr), typeof(reltol), + typeof(__issquare(assumptions)) }(A, b, u0, p, alg, cacheval, isfresh, Pl, Pr, abstol, reltol, maxiters, verbose, assumptions) diff --git a/src/common.jl b/src/common.jl index 97719bffc..777960346 100644 --- a/src/common.jl +++ b/src/common.jl @@ -121,8 +121,8 @@ function SciMLBase.init(prob::LinearProblem, alg::SciMLLinearSolveAlgorithm, kwargs...) @unpack A, b, u0, p = prob - A = if alias_A - A + A = if alias_A + A elseif A isa Array || A isa SparseMatrixCSC copy(A) else @@ -161,7 +161,7 @@ function SciMLBase.init(prob::LinearProblem, alg::SciMLLinearSolveAlgorithm, typeof(Pl), typeof(Pr), typeof(reltol), - typeof(assumptions.issq), + typeof(assumptions.issq) }(A, b, u0, diff --git a/src/default.jl b/src/default.jl index d0991630f..0ca1476a3 100644 --- a/src/default.jl +++ b/src/default.jl @@ -22,7 +22,8 @@ struct DefaultLinearSolver <: SciMLLinearSolveAlgorithm alg::DefaultAlgorithmChoice.T end -mutable struct DefaultLinearSolverInit{T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17} +mutable struct DefaultLinearSolverInit{T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, + T13, T14, T15, T16, T17} LUFactorization::T1 QRFactorization::T2 DiagonalFactorization::T3 @@ -81,11 +82,11 @@ function defaultalg(A::Hermitian, b, ::OperatorAssumptions) DefaultLinearSolver(DefaultAlgorithmChoice.CholeskyFactorization) end -function defaultalg(A::Symmetric{<:Number,<:Array}, b, ::OperatorAssumptions) +function defaultalg(A::Symmetric{<:Number, <:Array}, b, ::OperatorAssumptions) DefaultLinearSolver(DefaultAlgorithmChoice.BunchKaufmanFactorization) end -function defaultalg(A::Symmetric{<:Number,<:SparseMatrixCSC}, b, ::OperatorAssumptions) +function defaultalg(A::Symmetric{<:Number, <:SparseMatrixCSC}, b, ::OperatorAssumptions) DefaultLinearSolver(DefaultAlgorithmChoice.CHOLMODFactorization) end @@ -122,7 +123,7 @@ function defaultalg(A::GPUArraysCore.AbstractGPUArray, b, assump::OperatorAssump else DefaultLinearSolver(DefaultAlgorithmChoice.QRFactorization) end - end + end end # A === nothing case @@ -135,11 +136,12 @@ function defaultalg(A, b::GPUArraysCore.AbstractGPUArray, assump::OperatorAssump else DefaultLinearSolver(DefaultAlgorithmChoice.QRFactorization) end - end + end end # Ambiguity handling -function defaultalg(A::GPUArraysCore.AbstractGPUArray, b::GPUArraysCore.AbstractGPUArray, assump::OperatorAssumptions) +function defaultalg(A::GPUArraysCore.AbstractGPUArray, b::GPUArraysCore.AbstractGPUArray, + assump::OperatorAssumptions) if assump.condition === OperatorConodition.IllConditioned || !assump.issq DefaultLinearSolver(DefaultAlgorithmChoice.QRFactorization) else @@ -148,7 +150,7 @@ function defaultalg(A::GPUArraysCore.AbstractGPUArray, b::GPUArraysCore.Abstract else DefaultLinearSolver(DefaultAlgorithmChoice.QRFactorization) end - end + end end function defaultalg(A::SciMLBase.AbstractSciMLOperator, b, @@ -163,7 +165,7 @@ function defaultalg(A::SciMLBase.AbstractSciMLOperator, b, DefaultLinearSolver(DefaultAlgorithmChoice.KrylovJL_LSMR) end else - DefaultLinearSolver( DefaultAlgorithmChoice.KrylovJL_GMRES) + DefaultLinearSolver(DefaultAlgorithmChoice.KrylovJL_GMRES) end end @@ -174,18 +176,19 @@ function defaultalg(A, b, assump::OperatorAssumptions) # it makes sense according to the benchmarks, which is dependent on # whether MKL or OpenBLAS is being used if (A === nothing && !(b isa GPUArraysCore.AbstractGPUArray)) || A isa Matrix - if (A === nothing || eltype(A) <: Union{Float32, Float64, ComplexF32, ComplexF64}) && - ArrayInterface.can_setindex(b) && - (__conditioning(assump) === OperatorCondition.IllConditioned || + if (A === nothing || + eltype(A) <: Union{Float32, Float64, ComplexF32, ComplexF64}) && + ArrayInterface.can_setindex(b) && + (__conditioning(assump) === OperatorCondition.IllConditioned || __conditioning(assump) === OperatorCondition.WellConditioned) if length(b) <= 10 if __conditioning(assump) === OperatorCondition.IllConditioned DefaultAlgorithmChoice.RowMaximumGenericLUFactorization else DefaultAlgorithmChoice.GenericLUFactorization - end + end elseif (length(b) <= 100 || (isopenblas() && length(b) <= 500)) && - (A === nothing ? eltype(b) <: Union{Float32, Float64} : + (A === nothing ? eltype(b) <: Union{Float32, Float64} : eltype(A) <: Union{Float32, Float64}) DefaultAlgorithmChoice.RFLUFactorization #elseif A === nothing || A isa Matrix @@ -295,28 +298,31 @@ end """ cache.cacheval = NamedTuple(LUFactorization = cache of LUFactorization, ...) """ -@generated function init_cacheval(alg::DefaultLinearSolver, A, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assump::OperatorAssumptions) +@generated function init_cacheval(alg::DefaultLinearSolver, A, b, u, Pl, Pr, maxiters::Int, + abstol, reltol, + verbose::Bool, assump::OperatorAssumptions) caches = map(first.(EnumX.symbol_map(DefaultAlgorithmChoice.T))) do alg if alg === :KrylovJL_GMRES quote if A isa Matrix || A isa SparseMatrixCSC nothing else - init_cacheval($(algchoice_to_alg(alg)), A, b, u, Pl, Pr, maxiters, abstol, reltol, - verbose, - assump) + init_cacheval($(algchoice_to_alg(alg)), A, b, u, Pl, Pr, maxiters, + abstol, reltol, + verbose, + assump) end end else quote - init_cacheval($(algchoice_to_alg(alg)), A, b, u, Pl, Pr, maxiters, abstol, reltol, - verbose, - assump) + init_cacheval($(algchoice_to_alg(alg)), A, b, u, Pl, Pr, maxiters, abstol, + reltol, + verbose, + assump) end end end - Expr(:call,:DefaultLinearSolverInit, caches...) + Expr(:call, :DefaultLinearSolverInit, caches...) end """ @@ -330,12 +336,14 @@ end ex = :() for alg in first.(EnumX.symbol_map(DefaultAlgorithmChoice.T)) ex = if ex == :() - Expr(:elseif, :(algsym === $(Meta.quot(alg))),:(getfield(cache.cacheval,$(Meta.quot(alg))))) + Expr(:elseif, :(algsym === $(Meta.quot(alg))), + :(getfield(cache.cacheval, $(Meta.quot(alg))))) else - Expr(:elseif, :(algsym === $(Meta.quot(alg))),:(getfield(cache.cacheval, $(Meta.quot(alg)))),ex) + Expr(:elseif, :(algsym === $(Meta.quot(alg))), + :(getfield(cache.cacheval, $(Meta.quot(alg)))), ex) end end - ex = Expr(:if,ex.args...) + ex = Expr(:if, ex.args...) quote if cache.alg isa DefaultLinearSolver @@ -346,8 +354,12 @@ end end end -defaultalg_symbol(::Type{T}) where T = Symbol(split(string(SciMLBase.parameterless_type(T)), ".")[end]) -defaultalg_symbol(::Type{<:GenericLUFactorization{LinearAlgebra.RowMaximum}}) = :RowMaximumGenericLUFactorization +function defaultalg_symbol(::Type{T}) where {T} + Symbol(split(string(SciMLBase.parameterless_type(T)), ".")[end]) +end +function defaultalg_symbol(::Type{<:GenericLUFactorization{LinearAlgebra.RowMaximum}}) + :RowMaximumGenericLUFactorization +end defaultalg_symbol(::Type{<:GenericFactorization{typeof(ldlt!)}}) = :LDLtFactorization """ @@ -358,21 +370,23 @@ else end """ @generated function SciMLBase.solve!(cache::LinearCache, alg::DefaultLinearSolver, - args...; assump::OperatorAssumptions = OperatorAssumptions(), - kwargs...) + args...; + assump::OperatorAssumptions = OperatorAssumptions(), + kwargs...) ex = :() for alg in first.(EnumX.symbol_map(DefaultAlgorithmChoice.T)) newex = quote sol = SciMLBase.solve!(cache, $(algchoice_to_alg(alg)), args...; kwargs...) SciMLBase.build_linear_solution(alg, sol.u, sol.resid, sol.cache; - retcode = sol.retcode, - iters = sol.iters, stats = sol.stats) + retcode = sol.retcode, + iters = sol.iters, stats = sol.stats) end ex = if ex == :() - Expr(:elseif, :(Symbol(alg.alg) === $(Meta.quot(alg))), newex, :(error("Algorithm Choice not Allowed"))) + Expr(:elseif, :(Symbol(alg.alg) === $(Meta.quot(alg))), newex, + :(error("Algorithm Choice not Allowed"))) else - Expr(:elseif, :(Symbol(alg.alg) === $(Meta.quot(alg))), newex,ex) + Expr(:elseif, :(Symbol(alg.alg) === $(Meta.quot(alg))), newex, ex) end end - ex = Expr(:if,ex.args...) -end \ No newline at end of file + ex = Expr(:if, ex.args...) +end diff --git a/src/factorization.jl b/src/factorization.jl index 18ed4c1e4..cadd1c4a9 100644 --- a/src/factorization.jl +++ b/src/factorization.jl @@ -6,15 +6,17 @@ function _ldiv!(x::Vector, A::Factorization, b::Vector) ldiv!(A, x) end -@generated function SciMLBase.solve!(cache::LinearCache, alg::AbstractFactorization; kwargs...) +@generated function SciMLBase.solve!(cache::LinearCache, alg::AbstractFactorization; + kwargs...) quote if cache.isfresh fact = do_factorization(alg, cache.A, cache.b, cache.u) cache.cacheval = fact cache.isfresh = false end - y = _ldiv!(cache.u, get_cacheval(cache, $(Meta.quot(defaultalg_symbol(alg)))), cache.b) - + y = _ldiv!(cache.u, get_cacheval(cache, $(Meta.quot(defaultalg_symbol(alg)))), + cache.b) + #= retcode = if LinearAlgebra.issuccess(fact) SciMLBase.ReturnCode.Success @@ -24,7 +26,6 @@ end SciMLBase.build_linear_solution(alg, y, nothing, cache; retcode = retcode) =# SciMLBase.build_linear_solution(alg, y, nothing, cache) - end end @@ -85,17 +86,19 @@ function init_cacheval(alg::Union{LUFactorization, GenericLUFactorization}, A, b ArrayInterface.lu_instance(convert(AbstractMatrix, A)) end -const PREALLOCATED_LU = ArrayInterface.lu_instance(rand(1,1)) +const PREALLOCATED_LU = ArrayInterface.lu_instance(rand(1, 1)) -function init_cacheval(alg::Union{LUFactorization, GenericLUFactorization}, A::Matrix{Float64}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) +function init_cacheval(alg::Union{LUFactorization, GenericLUFactorization}, + A::Matrix{Float64}, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) PREALLOCATED_LU end -function init_cacheval(alg::Union{LUFactorization, GenericLUFactorization}, A::AbstractSciMLOperator, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) +function init_cacheval(alg::Union{LUFactorization, GenericLUFactorization}, + A::AbstractSciMLOperator, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) nothing end @@ -132,30 +135,30 @@ function init_cacheval(alg::QRFactorization, A, b, u, Pl, Pr, ArrayInterface.qr_instance(convert(AbstractMatrix, A)) end -const PREALLOCATED_QR = ArrayInterface.qr_instance(rand(1,1)) +const PREALLOCATED_QR = ArrayInterface.qr_instance(rand(1, 1)) function init_cacheval(alg::QRFactorization, A::Matrix{Float64}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) PREALLOCATED_QR end function init_cacheval(alg::QRFactorization, A::AbstractSciMLOperator, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) nothing end ## CholeskyFactorization -struct CholeskyFactorization{P,P2} <: AbstractFactorization +struct CholeskyFactorization{P, P2} <: AbstractFactorization pivot::P tol::Int shift::Float64 perm::P2 end -function CholeskyFactorization(;pivot = nothing, tol=0.0, shift = 0.0, perm = nothing) +function CholeskyFactorization(; pivot = nothing, tol = 0.0, shift = 0.0, perm = nothing) if pivot === nothing pivot = @static if VERSION < v"1.7beta" Val(false) @@ -171,9 +174,9 @@ function do_factorization(alg::CholeskyFactorization, A, b, u) if A isa SparseMatrixCSC fact = cholesky!(A; shift = alg.shift, check = false, perm = alg.perm) elseif alg.pivot === Val(false) || alg.pivot === NoPivot() - fact = cholesky!(A, alg.pivot; check=false) + fact = cholesky!(A, alg.pivot; check = false) else - fact = cholesky!(A, alg.pivot; tol=alg.tol, check=false) + fact = cholesky!(A, alg.pivot; tol = alg.tol, check = false) end return fact end @@ -189,24 +192,24 @@ end else cholpivot = NoPivot() end - -const PREALLOCATED_CHOLESKY = ArrayInterface.cholesky_instance(rand(1,1), cholpivot) + +const PREALLOCATED_CHOLESKY = ArrayInterface.cholesky_instance(rand(1, 1), cholpivot) function init_cacheval(alg::CholeskyFactorization, A::Matrix{Float64}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) PREALLOCATED_CHOLESKY end function init_cacheval(alg::CholeskyFactorization, A::Diagonal, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) nothing end function init_cacheval(alg::CholeskyFactorization, A::AbstractSciMLOperator, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) nothing end @@ -231,9 +234,9 @@ function do_factorization(alg::LDLtFactorization, A, b, u) return fact end -function init_cacheval(alg::LDLtFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) +function init_cacheval(alg::LDLtFactorization, A, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, + verbose::Bool, assumptions::OperatorAssumptions) nothing end @@ -264,17 +267,17 @@ function init_cacheval(alg::SVDFactorization, A::Matrix, b, u, Pl, Pr, ArrayInterface.svd_instance(convert(AbstractMatrix, A)) end -const PREALLOCATED_SVD = ArrayInterface.svd_instance(rand(1,1)) +const PREALLOCATED_SVD = ArrayInterface.svd_instance(rand(1, 1)) function init_cacheval(alg::SVDFactorization, A::Matrix{Float64}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) PREALLOCATED_SVD end function init_cacheval(alg::SVDFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) nothing end @@ -290,23 +293,25 @@ function do_factorization(alg::BunchKaufmanFactorization, A, b, u) return fact end -function init_cacheval(alg::BunchKaufmanFactorization, A::Symmetric{<:Number,<:Matrix}, b, u, Pl, Pr, +function init_cacheval(alg::BunchKaufmanFactorization, A::Symmetric{<:Number, <:Matrix}, b, + u, Pl, Pr, maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) ArrayInterface.bunchkaufman_instance(convert(AbstractMatrix, A)) end -const PREALLOCATED_BUNCHKAUFMAN = ArrayInterface.bunchkaufman_instance(Symmetric(rand(1,1))) +const PREALLOCATED_BUNCHKAUFMAN = ArrayInterface.bunchkaufman_instance(Symmetric(rand(1, 1))) -function init_cacheval(alg::BunchKaufmanFactorization, A::Symmetric{Float64,Matrix{Float64}}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) +function init_cacheval(alg::BunchKaufmanFactorization, + A::Symmetric{Float64, Matrix{Float64}}, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) PREALLOCATED_BUNCHKAUFMAN end function init_cacheval(alg::BunchKaufmanFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) nothing end @@ -523,21 +528,25 @@ end @static if VERSION < v"1.9.0-DEV.1622" const PREALLOCATED_UMFPACK = SuiteSparse.UMFPACK.UmfpackLU(C_NULL, C_NULL, 0, 0, - [0], Int64[], Float64[], 0) + [0], Int64[], Float64[], 0) finalizer(SuiteSparse.UMFPACK.umfpack_free_symbolic, PREALLOCATED_UMFPACK) else - const PREALLOCATED_UMFPACK = SuiteSparse.UMFPACK.UmfpackLU(SparseMatrixCSC(0,0, [1], Int64[], Float64[])) + const PREALLOCATED_UMFPACK = SuiteSparse.UMFPACK.UmfpackLU(SparseMatrixCSC(0, 0, [1], + Int64[], + Float64[])) end -function init_cacheval(alg::UMFPACKFactorization, A::Union{Nothing,Matrix,AbstractSciMLOperator}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) +function init_cacheval(alg::UMFPACKFactorization, + A::Union{Nothing, Matrix, AbstractSciMLOperator}, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, + verbose::Bool, assumptions::OperatorAssumptions) nothing end -function init_cacheval(alg::UMFPACKFactorization, A::SparseMatrixCSC{Float64,Int}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) +function init_cacheval(alg::UMFPACKFactorization, A::SparseMatrixCSC{Float64, Int}, b, u, + Pl, Pr, + maxiters::Int, abstol, reltol, + verbose::Bool, assumptions::OperatorAssumptions) PREALLOCATED_UMFPACK end @@ -550,11 +559,12 @@ function init_cacheval(alg::UMFPACKFactorization, A, b, u, Pl, Pr, maxiters::Int zerobased = SparseArrays.getcolptr(A)[1] == 0 @static if VERSION < v"1.9.0-DEV.1622" res = SuiteSparse.UMFPACK.UmfpackLU(C_NULL, C_NULL, size(A, 1), size(A, 2), - zerobased ? copy(SparseArrays.getcolptr(A)) : - SuiteSparse.decrement(SparseArrays.getcolptr(A)), - zerobased ? copy(rowvals(A)) : - SuiteSparse.decrement(rowvals(A)), - copy(nonzeros(A)), 0) + zerobased ? + copy(SparseArrays.getcolptr(A)) : + SuiteSparse.decrement(SparseArrays.getcolptr(A)), + zerobased ? copy(rowvals(A)) : + SuiteSparse.decrement(rowvals(A)), + copy(nonzeros(A)), 0) finalizer(SuiteSparse.UMFPACK.umfpack_free_symbolic, res) return res else @@ -565,11 +575,12 @@ function init_cacheval(alg::UMFPACKFactorization, A, b, u, Pl, Pr, maxiters::Int else @static if VERSION < v"1.9.0-DEV.1622" res = SuiteSparse.UMFPACK.UmfpackLU(C_NULL, C_NULL, 0, 0, - [0], Int64[], eltype(A)[], 0) + [0], Int64[], eltype(A)[], 0) finalizer(SuiteSparse.UMFPACK.umfpack_free_symbolic, res) return res else - return SuiteSparse.UMFPACK.UmfpackLU(SparseMatrixCSC(0,0, [1], Int64[], eltype(A)[])) + return SuiteSparse.UMFPACK.UmfpackLU(SparseMatrixCSC(0, 0, [1], Int64[], + eltype(A)[])) end end end @@ -582,7 +593,8 @@ function SciMLBase.solve!(cache::LinearCache, alg::UMFPACKFactorization; kwargs. # Caches the symbolic factorization: https://github.com/JuliaLang/julia/pull/33738 if alg.check_pattern && !(SuiteSparse.decrement(SparseArrays.getcolptr(A)) == cache.cacheval.colptr && - SuiteSparse.decrement(SparseArrays.getrowval(A)) == get_cacheval(cache,:UMFPACKFactorization).rowval) + SuiteSparse.decrement(SparseArrays.getrowval(A)) == + get_cacheval(cache, :UMFPACKFactorization).rowval) fact = lu(SparseMatrixCSC(size(A)..., getcolptr(A), rowvals(A), nonzeros(A))) else @@ -606,15 +618,18 @@ Base.@kwdef struct KLUFactorization <: AbstractFactorization check_pattern::Bool = true end -const PREALLOCATED_KLU = KLU.KLUFactorization(SparseMatrixCSC(0,0, [1], Int64[], Float64[])) +const PREALLOCATED_KLU = KLU.KLUFactorization(SparseMatrixCSC(0, 0, [1], Int64[], + Float64[])) -function init_cacheval(alg::KLUFactorization, A::Union{Matrix,Nothing,AbstractSciMLOperator}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) +function init_cacheval(alg::KLUFactorization, + A::Union{Matrix, Nothing, AbstractSciMLOperator}, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, + verbose::Bool, assumptions::OperatorAssumptions) nothing end -function init_cacheval(alg::KLUFactorization, A::SparseMatrixCSC{Float64,Int}, b, u, Pl, Pr, +function init_cacheval(alg::KLUFactorization, A::SparseMatrixCSC{Float64, Int}, b, u, Pl, + Pr, maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) PREALLOCATED_KLU @@ -626,9 +641,9 @@ function init_cacheval(alg::KLUFactorization, A, b, u, Pl, Pr, maxiters::Int, ab A = convert(AbstractMatrix, A) if typeof(A) <: SparseArrays.AbstractSparseArray return KLU.KLUFactorization(SparseMatrixCSC(size(A)..., getcolptr(A), rowvals(A), - nonzeros(A))) + nonzeros(A))) else - return KLU.KLUFactorization(SparseMatrixCSC(0,0, [1], Int64[], eltype(A)[])) + return KLU.KLUFactorization(SparseMatrixCSC(0, 0, [1], Int64[], eltype(A)[])) end end @@ -637,7 +652,7 @@ function SciMLBase.solve!(cache::LinearCache, alg::KLUFactorization; kwargs...) A = convert(AbstractMatrix, A) if cache.isfresh - cacheval = get_cacheval(cache,:KLUFactorization) + cacheval = get_cacheval(cache, :KLUFactorization) if cacheval !== nothing && alg.reuse_symbolic if alg.check_pattern && !(SuiteSparse.decrement(SparseArrays.getcolptr(A)) == cacheval.colptr && @@ -677,15 +692,17 @@ Base.@kwdef struct CHOLMODFactorization{T} <: AbstractFactorization perm::T = nothing end -const PREALLOCATED_CHOLMOD = cholesky(SparseMatrixCSC(0,0, [1], Int64[], Float64[])) +const PREALLOCATED_CHOLMOD = cholesky(SparseMatrixCSC(0, 0, [1], Int64[], Float64[])) -function init_cacheval(alg::CHOLMODFactorization, A::Union{Matrix,Nothing,AbstractSciMLOperator}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) +function init_cacheval(alg::CHOLMODFactorization, + A::Union{Matrix, Nothing, AbstractSciMLOperator}, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, + verbose::Bool, assumptions::OperatorAssumptions) nothing end -function init_cacheval(alg::CHOLMODFactorization, A::SparseMatrixCSC{Float64,Int}, b, u, Pl, Pr, +function init_cacheval(alg::CHOLMODFactorization, A::SparseMatrixCSC{Float64, Int}, b, u, + Pl, Pr, maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) PREALLOCATED_CHOLMOD @@ -694,7 +711,7 @@ end function init_cacheval(alg::CHOLMODFactorization, A, b, u, Pl, Pr, maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) - cholesky(SparseMatrixCSC(0,0, [1], Int64[], eltype(A)[])) + cholesky(SparseMatrixCSC(0, 0, [1], Int64[], eltype(A)[])) end function SciMLBase.solve!(cache::LinearCache, alg::CHOLMODFactorization; kwargs...) @@ -702,7 +719,7 @@ function SciMLBase.solve!(cache::LinearCache, alg::CHOLMODFactorization; kwargs. A = convert(AbstractMatrix, A) if cache.isfresh - cacheval = get_cacheval(cache,:CHOLMODFactorization) + cacheval = get_cacheval(cache, :CHOLMODFactorization) fact = cholesky(A; check = false) if !LinearAlgebra.issuccess(fact) ldlt!(fact, A; check = false) @@ -731,28 +748,31 @@ function init_cacheval(alg::RFLUFactorization, A, b, u, Pl, Pr, maxiters::Int, ArrayInterface.lu_instance(convert(AbstractMatrix, A)), ipiv end -function init_cacheval(alg::RFLUFactorization, A::Matrix{Float64}, b, u, Pl, Pr, maxiters::Int, +function init_cacheval(alg::RFLUFactorization, A::Matrix{Float64}, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) ipiv = Vector{LinearAlgebra.BlasInt}(undef, min(size(A)...)) PREALLOCATED_LU, ipiv end -function init_cacheval(alg::RFLUFactorization, A::Union{AbstractSparseArray,AbstractSciMLOperator}, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) - nothing,nothing +function init_cacheval(alg::RFLUFactorization, + A::Union{AbstractSparseArray, AbstractSciMLOperator}, b, u, Pl, Pr, + maxiters::Int, + abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + nothing, nothing end function SciMLBase.solve!(cache::LinearCache, alg::RFLUFactorization{P, T}; kwargs...) where {P, T} A = cache.A A = convert(AbstractMatrix, A) - fact, ipiv = get_cacheval(cache,:RFLUFactorization) + fact, ipiv = get_cacheval(cache, :RFLUFactorization) if cache.isfresh fact = RecursiveFactorization.lu!(A, ipiv, Val(P), Val(T)) cache.cacheval = (fact, ipiv) cache.isfresh = false end - y = ldiv!(cache.u, get_cacheval(cache,:RFLUFactorization)[1], cache.b) + y = ldiv!(cache.u, get_cacheval(cache, :RFLUFactorization)[1], cache.b) SciMLBase.build_linear_solution(alg, y, nothing, cache) end @@ -779,26 +799,30 @@ default_alias_b(::NormalCholeskyFactorization, ::Any, ::Any) = true @static if VERSION < v"1.7beta" normcholpivot = Val(false) else - normcholpivot = RowMaximum() + normcholpivot = NoPivot() end -const PREALLOCATED_NORMALCHOLESKY = ArrayInterface.cholesky_instance(rand(1,1), normcholpivot) +const PREALLOCATED_NORMALCHOLESKY = ArrayInterface.cholesky_instance(rand(1, 1), + normcholpivot) -function init_cacheval(alg::NormalCholeskyFactorization, A::Union{AbstractSparseArray,Symmetric{<:Number,<:AbstractSparseArray}}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) +function init_cacheval(alg::NormalCholeskyFactorization, + A::Union{AbstractSparseArray, + Symmetric{<:Number, <:AbstractSparseArray}}, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) ArrayInterface.cholesky_instance(convert(AbstractMatrix, A)) end function init_cacheval(alg::NormalCholeskyFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) ArrayInterface.cholesky_instance(convert(AbstractMatrix, A), alg.pivot) end -function init_cacheval(alg::NormalCholeskyFactorization, A::Union{Diagonal,AbstractSciMLOperator}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) +function init_cacheval(alg::NormalCholeskyFactorization, + A::Union{Diagonal, AbstractSciMLOperator}, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) nothing end @@ -816,7 +840,7 @@ function SciMLBase.solve!(cache::LinearCache, alg::NormalCholeskyFactorization; cache.isfresh = false end if A isa SparseMatrixCSC - cache.u .= get_cacheval(cache,:NormalCholeskyFactorization) \ (A' * cache.b) + cache.u .= get_cacheval(cache, :NormalCholeskyFactorization) \ (A' * cache.b) y = cache.u else y = ldiv!(cache.u, get_cacheval(cache, :NormalCholeskyFactorization), A' * cache.b) @@ -900,7 +924,7 @@ end function SciMLBase.solve!(cache::LinearCache, alg::FastLUFactorization; kwargs...) A = cache.A A = convert(AbstractMatrix, A) - ws_and_fact = get_cacheval(cache,:FastLUFactorization) + ws_and_fact = get_cacheval(cache, :FastLUFactorization) if cache.isfresh # we will fail here if A is a different *size* than in a previous version of the same cache. # it may instead be desirable to resize the workspace. @@ -994,17 +1018,20 @@ Base.@kwdef struct SparspakFactorization <: AbstractFactorization reuse_symbolic::Bool = true end -const PREALLOCATED_SPARSEPAK = sparspaklu(SparseMatrixCSC(0,0, [1], Int64[], Float64[]), factorize = false) +const PREALLOCATED_SPARSEPAK = sparspaklu(SparseMatrixCSC(0, 0, [1], Int64[], Float64[]), + factorize = false) -function init_cacheval(alg::SparspakFactorization, A::Union{Matrix,Nothing,AbstractSciMLOperator}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) +function init_cacheval(alg::SparspakFactorization, + A::Union{Matrix, Nothing, AbstractSciMLOperator}, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, + verbose::Bool, assumptions::OperatorAssumptions) nothing end -function init_cacheval(::SparspakFactorization, A::SparseMatrixCSC{Float64, Int}, b, u, Pl, Pr, maxiters::Int, abstol, - reltol, - verbose::Bool, assumptions::OperatorAssumptions) +function init_cacheval(::SparspakFactorization, A::SparseMatrixCSC{Float64, Int}, b, u, Pl, + Pr, maxiters::Int, abstol, + reltol, + verbose::Bool, assumptions::OperatorAssumptions) PREALLOCATED_SPARSEPAK end @@ -1013,11 +1040,12 @@ function init_cacheval(::SparspakFactorization, A, b, u, Pl, Pr, maxiters::Int, verbose::Bool, assumptions::OperatorAssumptions) A = convert(AbstractMatrix, A) if typeof(A) <: SparseArrays.AbstractSparseArray - return sparspaklu(SparseMatrixCSC(size(A)..., getcolptr(A), rowvals(A), nonzeros(A)), - factorize = false) + return sparspaklu(SparseMatrixCSC(size(A)..., getcolptr(A), rowvals(A), + nonzeros(A)), + factorize = false) else - return sparspaklu(SparseMatrixCSC(0,0, [1], Int64[], eltype(A)[]), - factorize = false) + return sparspaklu(SparseMatrixCSC(0, 0, [1], Int64[], eltype(A)[]), + factorize = false) end end diff --git a/src/iterative_wrappers.jl b/src/iterative_wrappers.jl index 17f0957c1..fad2d610d 100644 --- a/src/iterative_wrappers.jl +++ b/src/iterative_wrappers.jl @@ -121,13 +121,13 @@ function init_cacheval(alg::KrylovJL, A, b, u, Pl, Pr, maxiters::Int, abstol, re if zeroinit solver = if (alg.KrylovAlg === Krylov.dqgmres! || - alg.KrylovAlg === Krylov.diom! || - alg.KrylovAlg === Krylov.gmres! || - alg.KrylovAlg === Krylov.fgmres! || - alg.KrylovAlg === Krylov.gpmr! || - alg.KrylovAlg === Krylov.fom!) + alg.KrylovAlg === Krylov.diom! || + alg.KrylovAlg === Krylov.gmres! || + alg.KrylovAlg === Krylov.fgmres! || + alg.KrylovAlg === Krylov.gpmr! || + alg.KrylovAlg === Krylov.fom!) if A isa SparseMatrixCSC - KS(SparseMatrixCSC(0,0, [1], Int64[], eltype(A)[]), eltype(b)[], 1) + KS(SparseMatrixCSC(0, 0, [1], Int64[], eltype(A)[]), eltype(b)[], 1) elseif A isa Matrix KS(Matrix{eltype(A)}(undef, 0, 0), eltype(b)[], 1) else @@ -135,7 +135,7 @@ function init_cacheval(alg::KrylovJL, A, b, u, Pl, Pr, maxiters::Int, abstol, re end else if A isa SparseMatrixCSC - KS(SparseMatrixCSC(0,0, [1], Int64[], eltype(A)[]), eltype(b)[]) + KS(SparseMatrixCSC(0, 0, [1], Int64[], eltype(A)[]), eltype(b)[]) elseif A isa Matrix KS(Matrix{eltype(A)}(undef, 0, 0), eltype(b)[]) else @@ -161,7 +161,7 @@ function init_cacheval(alg::KrylovJL, A, b, u, Pl, Pr, maxiters::Int, abstol, re else KS(A, b) end - end + end solver.x = u diff --git a/test/basictests.jl b/test/basictests.jl index 8aa6fe460..5f4530c69 100644 --- a/test/basictests.jl +++ b/test/basictests.jl @@ -405,8 +405,10 @@ end prob1 = LinearProblem(op1, b1; u0 = x1) prob2 = LinearProblem(op2, b2; u0 = x2) - @test LinearSolve.defaultalg(op1, x1).alg === LinearSolve.DefaultAlgorithmChoice.DirectLdiv! - @test LinearSolve.defaultalg(op2, x2).alg === LinearSolve.DefaultAlgorithmChoice.DirectLdiv! + @test LinearSolve.defaultalg(op1, x1).alg === + LinearSolve.DefaultAlgorithmChoice.DirectLdiv! + @test LinearSolve.defaultalg(op2, x2).alg === + LinearSolve.DefaultAlgorithmChoice.DirectLdiv! test_interface(DirectLdiv!(), prob1, prob2) test_interface(nothing, prob1, prob2) diff --git a/test/default_algs.jl b/test/default_algs.jl index 1b25d17a2..90d6ef989 100644 --- a/test/default_algs.jl +++ b/test/default_algs.jl @@ -1,18 +1,25 @@ using LinearSolve, LinearAlgebra, SparseArrays, Test -@test LinearSolve.defaultalg(nothing, zeros(3)).alg === LinearSolve.DefaultAlgorithmChoice.RowMaximumGenericLUFactorization -@test LinearSolve.defaultalg(nothing, zeros(50)).alg === LinearSolve.DefaultAlgorithmChoice.RFLUFactorization -@test LinearSolve.defaultalg(nothing, zeros(600)).alg === LinearSolve.DefaultAlgorithmChoice.RowMaximumGenericLUFactorization -@test LinearSolve.defaultalg(LinearAlgebra.Diagonal(zeros(5)), zeros(5)).alg === LinearSolve.DefaultAlgorithmChoice.DiagonalFactorization +@test LinearSolve.defaultalg(nothing, zeros(3)).alg === + LinearSolve.DefaultAlgorithmChoice.RowMaximumGenericLUFactorization +@test LinearSolve.defaultalg(nothing, zeros(50)).alg === + LinearSolve.DefaultAlgorithmChoice.RFLUFactorization +@test LinearSolve.defaultalg(nothing, zeros(600)).alg === + LinearSolve.DefaultAlgorithmChoice.RowMaximumGenericLUFactorization +@test LinearSolve.defaultalg(LinearAlgebra.Diagonal(zeros(5)), zeros(5)).alg === + LinearSolve.DefaultAlgorithmChoice.DiagonalFactorization @test LinearSolve.defaultalg(nothing, zeros(5), - LinearSolve.OperatorAssumptions(false)).alg === LinearSolve.DefaultAlgorithmChoice.QRFactorization + LinearSolve.OperatorAssumptions(false)).alg === + LinearSolve.DefaultAlgorithmChoice.QRFactorization -@test LinearSolve.defaultalg(sprand(1000, 1000, 0.01), zeros(1000)).alg === LinearSolve.DefaultAlgorithmChoice.KLUFactorization -@test LinearSolve.defaultalg(sprand(11000, 11000, 0.001), zeros(11000)).alg === LinearSolve.DefaultAlgorithmChoice.UMFPACKFactorization +@test LinearSolve.defaultalg(sprand(1000, 1000, 0.01), zeros(1000)).alg === + LinearSolve.DefaultAlgorithmChoice.KLUFactorization +@test LinearSolve.defaultalg(sprand(11000, 11000, 0.001), zeros(11000)).alg === + LinearSolve.DefaultAlgorithmChoice.UMFPACKFactorization # Test inference A = rand(4, 4) b = rand(4) prob = LinearProblem(A, b) @inferred solve(prob) -@inferred init(prob, nothing) \ No newline at end of file +@inferred init(prob, nothing) diff --git a/test/resolve.jl b/test/resolve.jl index 05a0490c5..8648aa44d 100644 --- a/test/resolve.jl +++ b/test/resolve.jl @@ -8,8 +8,8 @@ for alg in subtypes(LinearSolve.AbstractFactorization) (A = sparse(A)) A = A' * A @show A - alg in [CHOLMODFactorization] && (A = sparse(Symmetric(A,:L))) - alg in [BunchKaufmanFactorization] && (A = Symmetric(A,:L)) + alg in [CHOLMODFactorization] && (A = sparse(Symmetric(A, :L))) + alg in [BunchKaufmanFactorization] && (A = Symmetric(A, :L)) alg in [LDLtFactorization] && (A = SymTridiagonal(A)) b = [1.0, 2.0] prob = LinearProblem(A, b) @@ -22,8 +22,8 @@ for alg in subtypes(LinearSolve.AbstractFactorization) alg in [KLUFactorization, UMFPACKFactorization, SparspakFactorization] && (A = sparse(A)) A = A' * A - alg in [CHOLMODFactorization] && (A = sparse(Symmetric(A,:L))) - alg in [BunchKaufmanFactorization] && (A = Symmetric(A,:L)) + alg in [CHOLMODFactorization] && (A = sparse(Symmetric(A, :L))) + alg in [BunchKaufmanFactorization] && (A = Symmetric(A, :L)) alg in [LDLtFactorization] && (A = SymTridiagonal(A)) linsolve.A = A @test linsolve.isfresh @@ -64,4 +64,3 @@ A = Symmetric([1.0 2.0 2.0 1.0]) b = [1.0, 2.0] @test solve!(linsolve).u ≈ [1.0, 0.0] - From c3ce0a8bac368edee03b85d066e7a3330956fd42 Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Tue, 30 May 2023 18:11:04 -0700 Subject: [PATCH 10/19] fix v1.6 --- src/default.jl | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/src/default.jl b/src/default.jl index 0ca1476a3..559eb4506 100644 --- a/src/default.jl +++ b/src/default.jl @@ -357,8 +357,15 @@ end function defaultalg_symbol(::Type{T}) where {T} Symbol(split(string(SciMLBase.parameterless_type(T)), ".")[end]) end -function defaultalg_symbol(::Type{<:GenericLUFactorization{LinearAlgebra.RowMaximum}}) - :RowMaximumGenericLUFactorization + +@static if VERSION < v"1.7beta" + function defaultalg_symbol(::Type{<:GenericLUFactorization{false}}) + :RowMaximumGenericLUFactorization + end +else + function defaultalg_symbol(::Type{<:GenericLUFactorization{LinearAlgebra.RowMaximum}}) + :RowMaximumGenericLUFactorization + end end defaultalg_symbol(::Type{<:GenericFactorization{typeof(ldlt!)}}) = :LDLtFactorization From 342f519f03dfea8bf03494c23bd261029bd6de0d Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Tue, 30 May 2023 18:22:17 -0700 Subject: [PATCH 11/19] more v1.6 --- Project.toml | 2 +- src/default.jl | 6 +++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/Project.toml b/Project.toml index 1b7e55195..0e5847b4a 100644 --- a/Project.toml +++ b/Project.toml @@ -27,7 +27,7 @@ SuiteSparse = "4607b0f0-06f3-5cda-b6b1-a6196a1729e9" UnPack = "3a884ed6-31ef-47d7-9d2a-63182c4928ed" [compat] -ArrayInterface = "7.3" +ArrayInterface = "7.4.7" DocStringExtensions = "0.8, 0.9" EnumX = "1" FastLapackInterface = "1" diff --git a/src/default.jl b/src/default.jl index 559eb4506..dc14aba3c 100644 --- a/src/default.jl +++ b/src/default.jl @@ -235,7 +235,11 @@ function algchoice_to_alg(alg::Symbol) if alg === :SVDFactorization SVDFactorization(false, LinearAlgebra.QRIteration()) elseif alg === :RowMaximumGenericLUFactorization - GenericLUFactorization(RowMaximum()) + @static if VERSION < v"1.7beta" + GenericLUFactorization(Val(true)) + else + GenericLUFactorization(RowMaximum()) + end elseif alg === :LDLtFactorization LDLtFactorization() elseif alg === :LUFactorization From 708b3a3ec6de45b1e0d524638e81382de6267027 Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Tue, 30 May 2023 18:54:54 -0700 Subject: [PATCH 12/19] fix v1.6, again. --- src/factorization.jl | 2 +- test/basictests.jl | 35 +++++++++++++++++++---------------- 2 files changed, 20 insertions(+), 17 deletions(-) diff --git a/src/factorization.jl b/src/factorization.jl index cadd1c4a9..9597aa518 100644 --- a/src/factorization.jl +++ b/src/factorization.jl @@ -751,7 +751,7 @@ end function init_cacheval(alg::RFLUFactorization, A::Matrix{Float64}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) - ipiv = Vector{LinearAlgebra.BlasInt}(undef, min(size(A)...)) + ipiv = Vector{LinearAlgebra.BlasInt}(undef, 0) PREALLOCATED_LU, ipiv end diff --git a/test/basictests.jl b/test/basictests.jl index 5f4530c69..3049a1623 100644 --- a/test/basictests.jl +++ b/test/basictests.jl @@ -254,22 +254,25 @@ end end end - @testset "CHOLMOD" begin - # Create a posdef symmetric matrix - A = sprand(100, 100, 0.01) - A = A + A' + 100 * I - - # rhs - b = rand(100) - - # Set the problem - prob = LinearProblem(A, b) - sol = solve(prob) - - # Enforce symmetry to use Cholesky, since A is symmetric and posdef - prob2 = LinearProblem(Symmetric(A), b) - sol2 = solve(prob2) - @test abs(norm(A * sol2.u .- b) - norm(A * sol.u .- b)) < 1e-12 + + if VERSION > v"1.7-" + @testset "CHOLMOD" begin + # Create a posdef symmetric matrix + A = sprand(100, 100, 0.01) + A = A + A' + 100 * I + + # rhs + b = rand(100) + + # Set the problem + prob = LinearProblem(A, b) + sol = solve(prob) + + # Enforce symmetry to use Cholesky, since A is symmetric and posdef + prob2 = LinearProblem(Symmetric(A), b) + sol2 = solve(prob2) + @test abs(norm(A * sol2.u .- b) - norm(A * sol.u .- b)) < 1e-12 + end end @testset "Preconditioners" begin From 1cad8d1e576d09b29609e0675ea3a08a769c3bae Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Tue, 30 May 2023 21:01:31 -0700 Subject: [PATCH 13/19] revert ipiv change --- src/factorization.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/factorization.jl b/src/factorization.jl index 9597aa518..cadd1c4a9 100644 --- a/src/factorization.jl +++ b/src/factorization.jl @@ -751,7 +751,7 @@ end function init_cacheval(alg::RFLUFactorization, A::Matrix{Float64}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) - ipiv = Vector{LinearAlgebra.BlasInt}(undef, 0) + ipiv = Vector{LinearAlgebra.BlasInt}(undef, min(size(A)...)) PREALLOCATED_LU, ipiv end From c9ade13173a56a7b141a081a811356e4347b7bdc Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Tue, 30 May 2023 21:19:40 -0700 Subject: [PATCH 14/19] only on v1.6 --- src/factorization.jl | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/factorization.jl b/src/factorization.jl index cadd1c4a9..078d25d23 100644 --- a/src/factorization.jl +++ b/src/factorization.jl @@ -751,7 +751,11 @@ end function init_cacheval(alg::RFLUFactorization, A::Matrix{Float64}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) - ipiv = Vector{LinearAlgebra.BlasInt}(undef, min(size(A)...)) + @static if VERSION < v"1.7-" + ipiv = Vector{LinearAlgebra.BlasInt}(undef, 0) + else + ipiv = Vector{LinearAlgebra.BlasInt}(undef, min(size(A)...)) + end PREALLOCATED_LU, ipiv end From a6a2f34adb5e194374445151c2c4624397e617c9 Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Wed, 31 May 2023 04:45:05 -0700 Subject: [PATCH 15/19] use preallocated dispatch only on v1.7+ --- src/factorization.jl | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/src/factorization.jl b/src/factorization.jl index 078d25d23..1080c6899 100644 --- a/src/factorization.jl +++ b/src/factorization.jl @@ -748,15 +748,13 @@ function init_cacheval(alg::RFLUFactorization, A, b, u, Pl, Pr, maxiters::Int, ArrayInterface.lu_instance(convert(AbstractMatrix, A)), ipiv end -function init_cacheval(alg::RFLUFactorization, A::Matrix{Float64}, b, u, Pl, Pr, - maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) - @static if VERSION < v"1.7-" +@static if VERSION >= v"1.7-" + function init_cacheval(alg::RFLUFactorization, A::Matrix{Float64}, b, u, Pl, Pr, + maxiters::Int, + abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) ipiv = Vector{LinearAlgebra.BlasInt}(undef, 0) - else - ipiv = Vector{LinearAlgebra.BlasInt}(undef, min(size(A)...)) + PREALLOCATED_LU, ipiv end - PREALLOCATED_LU, ipiv end function init_cacheval(alg::RFLUFactorization, From e16eab66288c9ccf9b1e566e0cc27c73e64877e5 Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Wed, 31 May 2023 04:50:29 -0700 Subject: [PATCH 16/19] just remove preallocated RFLU --- src/factorization.jl | 9 --------- 1 file changed, 9 deletions(-) diff --git a/src/factorization.jl b/src/factorization.jl index 1080c6899..e228464c1 100644 --- a/src/factorization.jl +++ b/src/factorization.jl @@ -748,15 +748,6 @@ function init_cacheval(alg::RFLUFactorization, A, b, u, Pl, Pr, maxiters::Int, ArrayInterface.lu_instance(convert(AbstractMatrix, A)), ipiv end -@static if VERSION >= v"1.7-" - function init_cacheval(alg::RFLUFactorization, A::Matrix{Float64}, b, u, Pl, Pr, - maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) - ipiv = Vector{LinearAlgebra.BlasInt}(undef, 0) - PREALLOCATED_LU, ipiv - end -end - function init_cacheval(alg::RFLUFactorization, A::Union{AbstractSparseArray, AbstractSciMLOperator}, b, u, Pl, Pr, maxiters::Int, From 0709d5ddee5232ac6c6846a95b6bab59455b720f Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Wed, 31 May 2023 05:22:02 -0700 Subject: [PATCH 17/19] test size --- src/factorization.jl | 1 + 1 file changed, 1 insertion(+) diff --git a/src/factorization.jl b/src/factorization.jl index e228464c1..2fe779696 100644 --- a/src/factorization.jl +++ b/src/factorization.jl @@ -744,6 +744,7 @@ end function init_cacheval(alg::RFLUFactorization, A, b, u, Pl, Pr, maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + @show size(A), length(b) ipiv = Vector{LinearAlgebra.BlasInt}(undef, min(size(A)...)) ArrayInterface.lu_instance(convert(AbstractMatrix, A)), ipiv end From 73dfa2caea061aa8f40cc387adb8461fdcc7dcea Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Wed, 31 May 2023 08:29:51 -0700 Subject: [PATCH 18/19] fully fix v1.6 --- src/default.jl | 46 +++++++------------------------ src/factorization.jl | 64 +++++++++++++++++++++++++++++++++++++++++++- test/default_algs.jl | 18 +++++++------ 3 files changed, 83 insertions(+), 45 deletions(-) diff --git a/src/default.jl b/src/default.jl index dc14aba3c..62b523dcc 100644 --- a/src/default.jl +++ b/src/default.jl @@ -8,7 +8,6 @@ EnumX.@enumx DefaultAlgorithmChoice begin UMFPACKFactorization KrylovJL_GMRES GenericLUFactorization - RowMaximumGenericLUFactorization RFLUFactorization LDLtFactorization BunchKaufmanFactorization @@ -23,7 +22,7 @@ struct DefaultLinearSolver <: SciMLLinearSolveAlgorithm end mutable struct DefaultLinearSolverInit{T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, - T13, T14, T15, T16, T17} + T13, T14, T15, T16} LUFactorization::T1 QRFactorization::T2 DiagonalFactorization::T3 @@ -33,14 +32,13 @@ mutable struct DefaultLinearSolverInit{T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, UMFPACKFactorization::T7 KrylovJL_GMRES::T8 GenericLUFactorization::T9 - RowMaximumGenericLUFactorization::T10 - RFLUFactorization::T11 - LDLtFactorization::T12 - BunchKaufmanFactorization::T13 - CHOLMODFactorization::T14 - SVDFactorization::T15 - CholeskyFactorization::T16 - NormalCholeskyFactorization::T17 + RFLUFactorization::T10 + LDLtFactorization::T11 + BunchKaufmanFactorization::T12 + CHOLMODFactorization::T13 + SVDFactorization::T14 + CholeskyFactorization::T15 + NormalCholeskyFactorization::T16 end # Legacy fallback @@ -182,11 +180,7 @@ function defaultalg(A, b, assump::OperatorAssumptions) (__conditioning(assump) === OperatorCondition.IllConditioned || __conditioning(assump) === OperatorCondition.WellConditioned) if length(b) <= 10 - if __conditioning(assump) === OperatorCondition.IllConditioned - DefaultAlgorithmChoice.RowMaximumGenericLUFactorization - else - DefaultAlgorithmChoice.GenericLUFactorization - end + DefaultAlgorithmChoice.GenericLUFactorization elseif (length(b) <= 100 || (isopenblas() && length(b) <= 500)) && (A === nothing ? eltype(b) <: Union{Float32, Float64} : eltype(A) <: Union{Float32, Float64}) @@ -194,11 +188,7 @@ function defaultalg(A, b, assump::OperatorAssumptions) #elseif A === nothing || A isa Matrix # alg = FastLUFactorization() else - if __conditioning(assump) === OperatorCondition.IllConditioned - DefaultAlgorithmChoice.RowMaximumGenericLUFactorization - else - DefaultAlgorithmChoice.GenericLUFactorization - end + DefaultAlgorithmChoice.GenericLUFactorization end elseif __conditioning(assump) === OperatorCondition.VeryIllConditioned DefaultAlgorithmChoice.QRFactorization @@ -234,12 +224,6 @@ end function algchoice_to_alg(alg::Symbol) if alg === :SVDFactorization SVDFactorization(false, LinearAlgebra.QRIteration()) - elseif alg === :RowMaximumGenericLUFactorization - @static if VERSION < v"1.7beta" - GenericLUFactorization(Val(true)) - else - GenericLUFactorization(RowMaximum()) - end elseif alg === :LDLtFactorization LDLtFactorization() elseif alg === :LUFactorization @@ -361,16 +345,6 @@ end function defaultalg_symbol(::Type{T}) where {T} Symbol(split(string(SciMLBase.parameterless_type(T)), ".")[end]) end - -@static if VERSION < v"1.7beta" - function defaultalg_symbol(::Type{<:GenericLUFactorization{false}}) - :RowMaximumGenericLUFactorization - end -else - function defaultalg_symbol(::Type{<:GenericLUFactorization{LinearAlgebra.RowMaximum}}) - :RowMaximumGenericLUFactorization - end -end defaultalg_symbol(::Type{<:GenericFactorization{typeof(ldlt!)}}) = :LDLtFactorization """ diff --git a/src/factorization.jl b/src/factorization.jl index 2fe779696..c52181417 100644 --- a/src/factorization.jl +++ b/src/factorization.jl @@ -102,6 +102,15 @@ function init_cacheval(alg::Union{LUFactorization, GenericLUFactorization}, nothing end +@static if VERSION < v"1.7-" + function init_cacheval(alg::Union{LUFactorization, GenericLUFactorization}, + A::Union{Diagonal,SymTridiagonal}, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) + nothing + end +end + ## QRFactorization struct QRFactorization{P} <: AbstractFactorization @@ -149,6 +158,15 @@ function init_cacheval(alg::QRFactorization, A::AbstractSciMLOperator, b, u, Pl, nothing end +@static if VERSION < v"1.7-" + function init_cacheval(alg::QRFactorization, + A::Union{Diagonal,SymTridiagonal,Tridiagonal}, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) + nothing + end +end + ## CholeskyFactorization struct CholeskyFactorization{P, P2} <: AbstractFactorization @@ -213,6 +231,14 @@ function init_cacheval(alg::CholeskyFactorization, A::AbstractSciMLOperator, b, nothing end +@static if VERSION < v"1.7beta" + function init_cacheval(alg::CholeskyFactorization, A::Union{SymTridiagonal,Tridiagonal}, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) + nothing + end +end + ## LDLtFactorization struct LDLtFactorization{T} <: AbstractFactorization @@ -281,6 +307,15 @@ function init_cacheval(alg::SVDFactorization, A, b, u, Pl, Pr, nothing end +@static if VERSION < v"1.7-" + function init_cacheval(alg::SVDFactorization, + A::Union{Diagonal,SymTridiagonal,Tridiagonal}, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) + nothing + end +end + ## BunchKaufmanFactorization Base.@kwdef struct BunchKaufmanFactorization <: AbstractFactorization @@ -744,11 +779,17 @@ end function init_cacheval(alg::RFLUFactorization, A, b, u, Pl, Pr, maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) - @show size(A), length(b) ipiv = Vector{LinearAlgebra.BlasInt}(undef, min(size(A)...)) ArrayInterface.lu_instance(convert(AbstractMatrix, A)), ipiv end +function init_cacheval(alg::RFLUFactorization, A::Matrix{Float64}, b, u, Pl, Pr, + maxiters::Int, + abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + ipiv = Vector{LinearAlgebra.BlasInt}(undef, 0) + PREALLOCATED_LU, ipiv +end + function init_cacheval(alg::RFLUFactorization, A::Union{AbstractSparseArray, AbstractSciMLOperator}, b, u, Pl, Pr, maxiters::Int, @@ -756,12 +797,24 @@ function init_cacheval(alg::RFLUFactorization, nothing, nothing end +@static if VERSION < v"1.7-" + function init_cacheval(alg::RFLUFactorization, + A::Union{Diagonal,SymTridiagonal,Tridiagonal}, b, u, Pl, Pr, + maxiters::Int, + abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + nothing, nothing + end +end + function SciMLBase.solve!(cache::LinearCache, alg::RFLUFactorization{P, T}; kwargs...) where {P, T} A = cache.A A = convert(AbstractMatrix, A) fact, ipiv = get_cacheval(cache, :RFLUFactorization) if cache.isfresh + if length(ipiv) != min(size(A)...) + ipiv = Vector{LinearAlgebra.BlasInt}(undef, min(size(A)...)) + end fact = RecursiveFactorization.lu!(A, ipiv, Val(P), Val(T)) cache.cacheval = (fact, ipiv) cache.isfresh = false @@ -820,6 +873,15 @@ function init_cacheval(alg::NormalCholeskyFactorization, nothing end +@static if VERSION < v"1.7-" + function init_cacheval(alg::NormalCholeskyFactorization, + A::Union{Tridiagonal, SymTridiagonal}, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) + nothing + end +end + function SciMLBase.solve!(cache::LinearCache, alg::NormalCholeskyFactorization; kwargs...) A = cache.A diff --git a/test/default_algs.jl b/test/default_algs.jl index 90d6ef989..2c4e39fbd 100644 --- a/test/default_algs.jl +++ b/test/default_algs.jl @@ -1,10 +1,10 @@ using LinearSolve, LinearAlgebra, SparseArrays, Test @test LinearSolve.defaultalg(nothing, zeros(3)).alg === - LinearSolve.DefaultAlgorithmChoice.RowMaximumGenericLUFactorization + LinearSolve.DefaultAlgorithmChoice.GenericLUFactorization @test LinearSolve.defaultalg(nothing, zeros(50)).alg === LinearSolve.DefaultAlgorithmChoice.RFLUFactorization @test LinearSolve.defaultalg(nothing, zeros(600)).alg === - LinearSolve.DefaultAlgorithmChoice.RowMaximumGenericLUFactorization + LinearSolve.DefaultAlgorithmChoice.GenericLUFactorization @test LinearSolve.defaultalg(LinearAlgebra.Diagonal(zeros(5)), zeros(5)).alg === LinearSolve.DefaultAlgorithmChoice.DiagonalFactorization @@ -17,9 +17,11 @@ using LinearSolve, LinearAlgebra, SparseArrays, Test @test LinearSolve.defaultalg(sprand(11000, 11000, 0.001), zeros(11000)).alg === LinearSolve.DefaultAlgorithmChoice.UMFPACKFactorization -# Test inference -A = rand(4, 4) -b = rand(4) -prob = LinearProblem(A, b) -@inferred solve(prob) -@inferred init(prob, nothing) +@static if VERSION >= v"v1.7-" + # Test inference + A = rand(4, 4) + b = rand(4) + prob = LinearProblem(A, b) + @inferred solve(prob) + @inferred init(prob, nothing) +end \ No newline at end of file From 1eccc20236a246eabd99e7059a18c06559b5bfa2 Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Wed, 31 May 2023 09:44:29 -0700 Subject: [PATCH 19/19] format --- src/factorization.jl | 39 ++++++++++++++++++++------------------- test/basictests.jl | 1 - test/default_algs.jl | 14 +++++++------- 3 files changed, 27 insertions(+), 27 deletions(-) diff --git a/src/factorization.jl b/src/factorization.jl index c52181417..7e614dac4 100644 --- a/src/factorization.jl +++ b/src/factorization.jl @@ -104,9 +104,9 @@ end @static if VERSION < v"1.7-" function init_cacheval(alg::Union{LUFactorization, GenericLUFactorization}, - A::Union{Diagonal,SymTridiagonal}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) + A::Union{Diagonal, SymTridiagonal}, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) nothing end end @@ -160,9 +160,9 @@ end @static if VERSION < v"1.7-" function init_cacheval(alg::QRFactorization, - A::Union{Diagonal,SymTridiagonal,Tridiagonal}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) + A::Union{Diagonal, SymTridiagonal, Tridiagonal}, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) nothing end end @@ -232,9 +232,10 @@ function init_cacheval(alg::CholeskyFactorization, A::AbstractSciMLOperator, b, end @static if VERSION < v"1.7beta" - function init_cacheval(alg::CholeskyFactorization, A::Union{SymTridiagonal,Tridiagonal}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) + function init_cacheval(alg::CholeskyFactorization, + A::Union{SymTridiagonal, Tridiagonal}, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) nothing end end @@ -309,9 +310,9 @@ end @static if VERSION < v"1.7-" function init_cacheval(alg::SVDFactorization, - A::Union{Diagonal,SymTridiagonal,Tridiagonal}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) + A::Union{Diagonal, SymTridiagonal, Tridiagonal}, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) nothing end end @@ -787,7 +788,7 @@ function init_cacheval(alg::RFLUFactorization, A::Matrix{Float64}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) ipiv = Vector{LinearAlgebra.BlasInt}(undef, 0) - PREALLOCATED_LU, ipiv + PREALLOCATED_LU, ipiv end function init_cacheval(alg::RFLUFactorization, @@ -799,9 +800,9 @@ end @static if VERSION < v"1.7-" function init_cacheval(alg::RFLUFactorization, - A::Union{Diagonal,SymTridiagonal,Tridiagonal}, b, u, Pl, Pr, - maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + A::Union{Diagonal, SymTridiagonal, Tridiagonal}, b, u, Pl, Pr, + maxiters::Int, + abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) nothing, nothing end end @@ -875,9 +876,9 @@ end @static if VERSION < v"1.7-" function init_cacheval(alg::NormalCholeskyFactorization, - A::Union{Tridiagonal, SymTridiagonal}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, - assumptions::OperatorAssumptions) + A::Union{Tridiagonal, SymTridiagonal}, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) nothing end end diff --git a/test/basictests.jl b/test/basictests.jl index 3049a1623..cdd4d71a0 100644 --- a/test/basictests.jl +++ b/test/basictests.jl @@ -254,7 +254,6 @@ end end end - if VERSION > v"1.7-" @testset "CHOLMOD" begin # Create a posdef symmetric matrix diff --git a/test/default_algs.jl b/test/default_algs.jl index 2c4e39fbd..0d42293f6 100644 --- a/test/default_algs.jl +++ b/test/default_algs.jl @@ -18,10 +18,10 @@ using LinearSolve, LinearAlgebra, SparseArrays, Test LinearSolve.DefaultAlgorithmChoice.UMFPACKFactorization @static if VERSION >= v"v1.7-" - # Test inference - A = rand(4, 4) - b = rand(4) - prob = LinearProblem(A, b) - @inferred solve(prob) - @inferred init(prob, nothing) -end \ No newline at end of file + # Test inference + A = rand(4, 4) + b = rand(4) + prob = LinearProblem(A, b) + @inferred solve(prob) + @inferred init(prob, nothing) +end