Line | Exclusive | Inclusive | Code |
---|---|---|---|
1 | macro get_cacheval(cache, algsym) | ||
2 | quote | ||
3 | if $(esc(cache)).alg isa DefaultLinearSolver | ||
4 | getfield($(esc(cache)).cacheval, $algsym) | ||
5 | else | ||
6 | $(esc(cache)).cacheval | ||
7 | end | ||
8 | end | ||
9 | end | ||
10 | |||
11 | _ldiv!(x, A, b) = ldiv!(x, A, b) | ||
12 | |||
13 | _ldiv!(x, A, b::SVector) = (x .= A \ b) | ||
14 | _ldiv!(::SVector, A, b::SVector) = (A \ b) | ||
15 | _ldiv!(::SVector, A, b) = (A \ b) | ||
16 | |||
17 | function _ldiv!(x::Vector, A::Factorization, b::Vector) | ||
18 | # workaround https://github.com/JuliaLang/julia/issues/43507 | ||
19 | # Fallback if working with non-square matrices | ||
20 | length(x) != length(b) && return ldiv!(x, A, b) | ||
21 | copyto!(x, b) | ||
22 | ldiv!(A, x) | ||
23 | end | ||
24 | |||
25 | # RF Bad fallback: will fail if `A` is just a stand-in | ||
26 | # This should instead just create the factorization type. | ||
27 | function init_cacheval(alg::AbstractFactorization, A, b, u, Pl, Pr, maxiters::Int, abstol, | ||
28 | reltol, verbose::Bool, assumptions::OperatorAssumptions) | ||
29 | do_factorization(alg, convert(AbstractMatrix, A), b, u) | ||
30 | end | ||
31 | |||
32 | ## LU Factorizations | ||
33 | |||
34 | """ | ||
35 | `LUFactorization(pivot=LinearAlgebra.RowMaximum())` | ||
36 | |||
37 | Julia's built in `lu`. Equivalent to calling `lu!(A)` | ||
38 | |||
39 | * On dense matrices, this uses the current BLAS implementation of the user's computer, | ||
40 | which by default is OpenBLAS but will use MKL if the user does `using MKL` in their | ||
41 | system. | ||
42 | * On sparse matrices, this will use UMFPACK from SparseArrays. Note that this will not | ||
43 | cache the symbolic factorization. | ||
44 | * On CuMatrix, it will use a CUDA-accelerated LU from CuSolver. | ||
45 | * On BandedMatrix and BlockBandedMatrix, it will use a banded LU. | ||
46 | |||
47 | ## Positional Arguments | ||
48 | |||
49 | * pivot: The choice of pivoting. Defaults to `LinearAlgebra.RowMaximum()`. The other choice is | ||
50 | `LinearAlgebra.NoPivot()`. | ||
51 | """ | ||
52 | struct LUFactorization{P} <: AbstractFactorization | ||
53 | pivot::P | ||
54 | end | ||
55 | |||
56 | """ | ||
57 | `GenericLUFactorization(pivot=LinearAlgebra.RowMaximum())` | ||
58 | |||
59 | Julia's built in generic LU factorization. Equivalent to calling LinearAlgebra.generic_lufact!. | ||
60 | Supports arbitrary number types but does not achieve as good scaling as BLAS-based LU implementations. | ||
61 | Has low overhead and is good for small matrices. | ||
62 | |||
63 | ## Positional Arguments | ||
64 | |||
65 | * pivot: The choice of pivoting. Defaults to `LinearAlgebra.RowMaximum()`. The other choice is | ||
66 | `LinearAlgebra.NoPivot()`. | ||
67 | """ | ||
68 | struct GenericLUFactorization{P} <: AbstractFactorization | ||
69 | pivot::P | ||
70 | end | ||
71 | |||
72 | LUFactorization() = LUFactorization(RowMaximum()) | ||
73 | |||
74 | GenericLUFactorization() = GenericLUFactorization(RowMaximum()) | ||
75 | |||
76 | function do_factorization(alg::LUFactorization, A, b, u) | ||
77 | A = convert(AbstractMatrix, A) | ||
78 | if A isa AbstractSparseMatrixCSC | ||
79 | return lu(SparseMatrixCSC(size(A)..., getcolptr(A), rowvals(A), nonzeros(A)), | ||
80 | check = false) | ||
81 | elseif A isa GPUArraysCore.AnyGPUArray | ||
82 | fact = lu(A; check = false) | ||
83 | elseif !ArrayInterface.can_setindex(typeof(A)) | ||
84 | fact = lu(A, alg.pivot, check = false) | ||
85 | else | ||
86 | fact = lu!(A, alg.pivot, check = false) | ||
87 | end | ||
88 | return fact | ||
89 | end | ||
90 | |||
91 | function do_factorization(alg::GenericLUFactorization, A, b, u) | ||
92 | A = convert(AbstractMatrix, A) | ||
93 | fact = LinearAlgebra.generic_lufact!(A, alg.pivot, check = false) | ||
94 | return fact | ||
95 | end | ||
96 | |||
97 | function init_cacheval(alg::Union{LUFactorization, GenericLUFactorization}, A, b, u, Pl, Pr, | ||
98 | maxiters::Int, abstol, reltol, verbose::Bool, | ||
99 | assumptions::OperatorAssumptions) | ||
100 | ArrayInterface.lu_instance(convert(AbstractMatrix, A)) | ||
101 | end | ||
102 | |||
103 | function init_cacheval(alg::Union{LUFactorization, GenericLUFactorization}, | ||
104 | A::Union{<:Adjoint, <:Transpose}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, | ||
105 | verbose::Bool, assumptions::OperatorAssumptions) | ||
106 | if alg isa LUFactorization | ||
107 | return lu(A; check=false) | ||
108 | else | ||
109 | A isa GPUArraysCore.AnyGPUArray && return nothing | ||
110 | return LinearAlgebra.generic_lufact!(copy(A), alg.pivot; check=false) | ||
111 | end | ||
112 | end | ||
113 | |||
114 | const PREALLOCATED_LU = ArrayInterface.lu_instance(rand(1, 1)) | ||
115 | |||
116 | function init_cacheval(alg::Union{LUFactorization, GenericLUFactorization}, | ||
117 | A::Matrix{Float64}, b, u, Pl, Pr, | ||
118 | maxiters::Int, abstol, reltol, verbose::Bool, | ||
119 | assumptions::OperatorAssumptions) | ||
120 | PREALLOCATED_LU | ||
121 | end | ||
122 | |||
123 | function init_cacheval(alg::Union{LUFactorization, GenericLUFactorization}, | ||
124 | A::AbstractSciMLOperator, b, u, Pl, Pr, | ||
125 | maxiters::Int, abstol, reltol, verbose::Bool, | ||
126 | assumptions::OperatorAssumptions) | ||
127 | nothing | ||
128 | end | ||
129 | |||
130 | ## QRFactorization | ||
131 | |||
132 | """ | ||
133 | `QRFactorization(pivot=LinearAlgebra.NoPivot(),blocksize=16)` | ||
134 | |||
135 | Julia's built in `qr`. Equivalent to calling `qr!(A)`. | ||
136 | |||
137 | * On dense matrices, this uses the current BLAS implementation of the user's computer | ||
138 | which by default is OpenBLAS but will use MKL if the user does `using MKL` in their | ||
139 | system. | ||
140 | * On sparse matrices, this will use SPQR from SparseArrays | ||
141 | * On CuMatrix, it will use a CUDA-accelerated QR from CuSolver. | ||
142 | * On BandedMatrix and BlockBandedMatrix, it will use a banded QR. | ||
143 | """ | ||
144 | struct QRFactorization{P} <: AbstractFactorization | ||
145 | pivot::P | ||
146 | blocksize::Int | ||
147 | inplace::Bool | ||
148 | end | ||
149 | |||
150 | QRFactorization(inplace = true) = QRFactorization(NoPivot(), 16, inplace) | ||
151 | |||
152 | function QRFactorization(pivot::LinearAlgebra.PivotingStrategy, inplace::Bool = true) | ||
153 | QRFactorization(pivot, 16, inplace) | ||
154 | end | ||
155 | |||
156 | function do_factorization(alg::QRFactorization, A, b, u) | ||
157 | A = convert(AbstractMatrix, A) | ||
158 | if ArrayInterface.can_setindex(typeof(A)) | ||
159 | if alg.inplace && !(A isa SparseMatrixCSC) && !(A isa GPUArraysCore.AnyGPUArray) | ||
160 | fact = qr!(A, alg.pivot) | ||
161 | else | ||
162 | fact = qr(A) # CUDA.jl does not allow other args! | ||
163 | end | ||
164 | else | ||
165 | fact = qr(A, alg.pivot) | ||
166 | end | ||
167 | return fact | ||
168 | end | ||
169 | |||
170 | function init_cacheval(alg::QRFactorization, A, b, u, Pl, Pr, | ||
171 | maxiters::Int, abstol, reltol, verbose::Bool, | ||
172 | assumptions::OperatorAssumptions) | ||
173 | ArrayInterface.qr_instance(convert(AbstractMatrix, A), alg.pivot) | ||
174 | end | ||
175 | |||
176 | function init_cacheval(alg::QRFactorization, A::Union{<:Adjoint, <:Transpose}, b, u, Pl, Pr, | ||
177 | maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) | ||
178 | A isa GPUArraysCore.AnyGPUArray && return qr(A) | ||
179 | return qr(A, alg.pivot) | ||
180 | end | ||
181 | |||
182 | const PREALLOCATED_QR = ArrayInterface.qr_instance(rand(1, 1)) | ||
183 | |||
184 | function init_cacheval(alg::QRFactorization{NoPivot}, A::Matrix{Float64}, b, u, Pl, Pr, | ||
185 | maxiters::Int, abstol, reltol, verbose::Bool, | ||
186 | assumptions::OperatorAssumptions) | ||
187 | PREALLOCATED_QR | ||
188 | end | ||
189 | |||
190 | function init_cacheval(alg::QRFactorization, A::AbstractSciMLOperator, b, u, Pl, Pr, | ||
191 | maxiters::Int, abstol, reltol, verbose::Bool, | ||
192 | assumptions::OperatorAssumptions) | ||
193 | nothing | ||
194 | end | ||
195 | |||
196 | ## CholeskyFactorization | ||
197 | |||
198 | """ | ||
199 | `CholeskyFactorization(; pivot = nothing, tol = 0.0, shift = 0.0, perm = nothing)` | ||
200 | |||
201 | Julia's built in `cholesky`. Equivalent to calling `cholesky!(A)`. | ||
202 | |||
203 | ## Keyword Arguments | ||
204 | |||
205 | * pivot: defaluts to NoPivot, can also be RowMaximum. | ||
206 | * tol: the tol argument in CHOLMOD. Only used for sparse matrices. | ||
207 | * shift: the shift argument in CHOLMOD. Only used for sparse matrices. | ||
208 | * perm: the perm argument in CHOLMOD. Only used for sparse matrices. | ||
209 | """ | ||
210 | struct CholeskyFactorization{P, P2} <: AbstractFactorization | ||
211 | pivot::P | ||
212 | tol::Int | ||
213 | shift::Float64 | ||
214 | perm::P2 | ||
215 | end | ||
216 | |||
217 | function CholeskyFactorization(; pivot = nothing, tol = 0.0, shift = 0.0, perm = nothing) | ||
218 | pivot === nothing && (pivot = NoPivot()) | ||
219 | CholeskyFactorization(pivot, 16, shift, perm) | ||
220 | end | ||
221 | |||
222 | function do_factorization(alg::CholeskyFactorization, A, b, u) | ||
223 | A = convert(AbstractMatrix, A) | ||
224 | if A isa SparseMatrixCSC | ||
225 | fact = cholesky(A; shift = alg.shift, check = false, perm = alg.perm) | ||
226 | elseif A isa GPUArraysCore.AnyGPUArray | ||
227 | fact = cholesky(A; check = false) | ||
228 | elseif alg.pivot === Val(false) || alg.pivot === NoPivot() | ||
229 | fact = cholesky!(A, alg.pivot; check = false) | ||
230 | else | ||
231 | fact = cholesky!(A, alg.pivot; tol = alg.tol, check = false) | ||
232 | end | ||
233 | return fact | ||
234 | end | ||
235 | |||
236 | function init_cacheval(alg::CholeskyFactorization, A::SMatrix{S1, S2}, b, u, Pl, Pr, | ||
237 | maxiters::Int, abstol, reltol, verbose::Bool, | ||
238 | assumptions::OperatorAssumptions) where {S1, S2} | ||
239 | cholesky(A) | ||
240 | end | ||
241 | |||
242 | function init_cacheval(alg::CholeskyFactorization, A::GPUArraysCore.AnyGPUArray, b, u, Pl, | ||
243 | Pr, maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) | ||
244 | cholesky(A; check=false) | ||
245 | end | ||
246 | |||
247 | function init_cacheval(alg::CholeskyFactorization, A, b, u, Pl, Pr, | ||
248 | maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) | ||
249 | ArrayInterface.cholesky_instance(convert(AbstractMatrix, A), alg.pivot) | ||
250 | end | ||
251 | |||
252 | const PREALLOCATED_CHOLESKY = ArrayInterface.cholesky_instance(rand(1, 1), NoPivot()) | ||
253 | |||
254 | function init_cacheval(alg::CholeskyFactorization, A::Matrix{Float64}, b, u, Pl, Pr, | ||
255 | maxiters::Int, abstol, reltol, verbose::Bool, | ||
256 | assumptions::OperatorAssumptions) | ||
257 | PREALLOCATED_CHOLESKY | ||
258 | end | ||
259 | |||
260 | function init_cacheval(alg::CholeskyFactorization, | ||
261 | A::Union{Diagonal, AbstractSciMLOperator}, b, u, Pl, Pr, | ||
262 | maxiters::Int, abstol, reltol, verbose::Bool, | ||
263 | assumptions::OperatorAssumptions) | ||
264 | nothing | ||
265 | end | ||
266 | |||
267 | ## LDLtFactorization | ||
268 | |||
269 | struct LDLtFactorization{T} <: AbstractFactorization | ||
270 | shift::Float64 | ||
271 | perm::T | ||
272 | end | ||
273 | |||
274 | function LDLtFactorization(shift = 0.0, perm = nothing) | ||
275 | LDLtFactorization(shift, perm) | ||
276 | end | ||
277 | |||
278 | function do_factorization(alg::LDLtFactorization, A, b, u) | ||
279 | A = convert(AbstractMatrix, A) | ||
280 | if !(A isa SparseMatrixCSC) | ||
281 | fact = ldlt!(A) | ||
282 | else | ||
283 | fact = ldlt!(A, shift = alg.shift, perm = alg.perm) | ||
284 | end | ||
285 | return fact | ||
286 | end | ||
287 | |||
288 | function init_cacheval(alg::LDLtFactorization, A, b, u, Pl, Pr, | ||
289 | maxiters::Int, abstol, reltol, | ||
290 | verbose::Bool, assumptions::OperatorAssumptions) | ||
291 | nothing | ||
292 | end | ||
293 | |||
294 | function init_cacheval(alg::LDLtFactorization, A::SymTridiagonal, b, u, Pl, Pr, | ||
295 | maxiters::Int, abstol, reltol, verbose::Bool, | ||
296 | assumptions::OperatorAssumptions) | ||
297 | ArrayInterface.ldlt_instance(convert(AbstractMatrix, A)) | ||
298 | end | ||
299 | |||
300 | ## SVDFactorization | ||
301 | |||
302 | """ | ||
303 | `SVDFactorization(full=false,alg=LinearAlgebra.DivideAndConquer())` | ||
304 | |||
305 | Julia's built in `svd`. Equivalent to `svd!(A)`. | ||
306 | |||
307 | * On dense matrices, this uses the current BLAS implementation of the user's computer | ||
308 | which by default is OpenBLAS but will use MKL if the user does `using MKL` in their | ||
309 | system. | ||
310 | """ | ||
311 | struct SVDFactorization{A} <: AbstractFactorization | ||
312 | full::Bool | ||
313 | alg::A | ||
314 | end | ||
315 | |||
316 | SVDFactorization() = SVDFactorization(false, LinearAlgebra.DivideAndConquer()) | ||
317 | |||
318 | function do_factorization(alg::SVDFactorization, A, b, u) | ||
319 | A = convert(AbstractMatrix, A) | ||
320 | if ArrayInterface.can_setindex(typeof(A)) | ||
321 | fact = svd!(A; alg.full, alg.alg) | ||
322 | else | ||
323 | fact = svd(A; alg.full) | ||
324 | end | ||
325 | return fact | ||
326 | end | ||
327 | |||
328 | function init_cacheval(alg::SVDFactorization, A::Union{Matrix, SMatrix}, b, u, Pl, Pr, | ||
329 | maxiters::Int, abstol, reltol, verbose::Bool, | ||
330 | assumptions::OperatorAssumptions) | ||
331 | ArrayInterface.svd_instance(convert(AbstractMatrix, A)) | ||
332 | end | ||
333 | |||
334 | const PREALLOCATED_SVD = ArrayInterface.svd_instance(rand(1, 1)) | ||
335 | |||
336 | function init_cacheval(alg::SVDFactorization, A::Matrix{Float64}, b, u, Pl, Pr, | ||
337 | maxiters::Int, abstol, reltol, verbose::Bool, | ||
338 | assumptions::OperatorAssumptions) | ||
339 | PREALLOCATED_SVD | ||
340 | end | ||
341 | |||
342 | function init_cacheval(alg::SVDFactorization, A, b, u, Pl, Pr, | ||
343 | maxiters::Int, abstol, reltol, verbose::Bool, | ||
344 | assumptions::OperatorAssumptions) | ||
345 | nothing | ||
346 | end | ||
347 | |||
348 | ## BunchKaufmanFactorization | ||
349 | |||
350 | """ | ||
351 | `BunchKaufmanFactorization(; rook = false)` | ||
352 | |||
353 | Julia's built in `bunchkaufman`. Equivalent to calling `bunchkaufman(A)`. | ||
354 | Only for Symmetric matrices. | ||
355 | |||
356 | ## Keyword Arguments | ||
357 | |||
358 | * rook: whether to perform rook pivoting. Defaults to false. | ||
359 | """ | ||
360 | Base.@kwdef struct BunchKaufmanFactorization <: AbstractFactorization | ||
361 | rook::Bool = false | ||
362 | end | ||
363 | |||
364 | function do_factorization(alg::BunchKaufmanFactorization, A, b, u) | ||
365 | A = convert(AbstractMatrix, A) | ||
366 | fact = bunchkaufman!(A, alg.rook; check = false) | ||
367 | return fact | ||
368 | end | ||
369 | |||
370 | function init_cacheval(alg::BunchKaufmanFactorization, A::Symmetric{<:Number, <:Matrix}, b, | ||
371 | u, Pl, Pr, | ||
372 | maxiters::Int, abstol, reltol, verbose::Bool, | ||
373 | assumptions::OperatorAssumptions) | ||
374 | ArrayInterface.bunchkaufman_instance(convert(AbstractMatrix, A)) | ||
375 | end | ||
376 | |||
377 | const PREALLOCATED_BUNCHKAUFMAN = ArrayInterface.bunchkaufman_instance(Symmetric(rand(1, | ||
378 | 1))) | ||
379 | |||
380 | function init_cacheval(alg::BunchKaufmanFactorization, | ||
381 | A::Symmetric{Float64, Matrix{Float64}}, b, u, Pl, Pr, | ||
382 | maxiters::Int, abstol, reltol, verbose::Bool, | ||
383 | assumptions::OperatorAssumptions) | ||
384 | PREALLOCATED_BUNCHKAUFMAN | ||
385 | end | ||
386 | |||
387 | function init_cacheval(alg::BunchKaufmanFactorization, A, b, u, Pl, Pr, | ||
388 | maxiters::Int, abstol, reltol, verbose::Bool, | ||
389 | assumptions::OperatorAssumptions) | ||
390 | nothing | ||
391 | end | ||
392 | |||
393 | ## GenericFactorization | ||
394 | |||
395 | """ | ||
396 | `GenericFactorization(;fact_alg=LinearAlgebra.factorize)`: Constructs a linear solver from a generic | ||
397 | factorization algorithm `fact_alg` which complies with the Base.LinearAlgebra | ||
398 | factorization API. Quoting from Base: | ||
399 | |||
400 | * If `A` is upper or lower triangular (or diagonal), no factorization of `A` is | ||
401 | required. The system is then solved with either forward or backward substitution. | ||
402 | For non-triangular square matrices, an LU factorization is used. | ||
403 | For rectangular `A` the result is the minimum-norm least squares solution computed by a | ||
404 | pivoted QR factorization of `A` and a rank estimate of `A` based on the R factor. | ||
405 | When `A` is sparse, a similar polyalgorithm is used. For indefinite matrices, the `LDLt` | ||
406 | factorization does not use pivoting during the numerical factorization and therefore the | ||
407 | procedure can fail even for invertible matrices. | ||
408 | |||
409 | ## Keyword Arguments | ||
410 | |||
411 | * fact_alg: the factorization algorithm to use. Defaults to `LinearAlgebra.factorize`, but can be | ||
412 | swapped to choices like `lu`, `qr` | ||
413 | """ | ||
414 | struct GenericFactorization{F} <: AbstractFactorization | ||
415 | fact_alg::F | ||
416 | end | ||
417 | |||
418 | GenericFactorization(; fact_alg = LinearAlgebra.factorize) = GenericFactorization(fact_alg) | ||
419 | |||
420 | function do_factorization(alg::GenericFactorization, A, b, u) | ||
421 | A = convert(AbstractMatrix, A) | ||
422 | fact = alg.fact_alg(A) | ||
423 | return fact | ||
424 | end | ||
425 | |||
426 | function init_cacheval(alg::GenericFactorization{typeof(lu)}, A::AbstractMatrix, b, u, Pl, Pr, | ||
427 | maxiters::Int, | ||
428 | abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) | ||
429 | ArrayInterface.lu_instance(A) | ||
430 | end | ||
431 | function init_cacheval(alg::GenericFactorization{typeof(lu!)}, A::AbstractMatrix, b, u, Pl, Pr, | ||
432 | maxiters::Int, | ||
433 | abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) | ||
434 | ArrayInterface.lu_instance(A) | ||
435 | end | ||
436 | |||
437 | function init_cacheval(alg::GenericFactorization{typeof(lu)}, | ||
438 | A::StridedMatrix{<:LinearAlgebra.BlasFloat}, b, u, Pl, Pr, | ||
439 | maxiters::Int, | ||
440 | abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) | ||
441 | ArrayInterface.lu_instance(A) | ||
442 | end | ||
443 | function init_cacheval(alg::GenericFactorization{typeof(lu!)}, | ||
444 | A::StridedMatrix{<:LinearAlgebra.BlasFloat}, b, u, Pl, Pr, | ||
445 | maxiters::Int, | ||
446 | abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) | ||
447 | ArrayInterface.lu_instance(A) | ||
448 | end | ||
449 | function init_cacheval(alg::GenericFactorization{typeof(lu)}, A::Diagonal, b, u, Pl, Pr, | ||
450 | maxiters::Int, abstol, reltol, verbose::Bool, | ||
451 | assumptions::OperatorAssumptions) | ||
452 | Diagonal(inv.(A.diag)) | ||
453 | end | ||
454 | function init_cacheval(alg::GenericFactorization{typeof(lu)}, A::Tridiagonal, b, u, Pl, Pr, | ||
455 | maxiters::Int, abstol, reltol, verbose::Bool, | ||
456 | assumptions::OperatorAssumptions) | ||
457 | ArrayInterface.lu_instance(A) | ||
458 | end | ||
459 | function init_cacheval(alg::GenericFactorization{typeof(lu!)}, A::Diagonal, b, u, Pl, Pr, | ||
460 | maxiters::Int, abstol, reltol, verbose::Bool, | ||
461 | assumptions::OperatorAssumptions) | ||
462 | Diagonal(inv.(A.diag)) | ||
463 | end | ||
464 | function init_cacheval(alg::GenericFactorization{typeof(lu!)}, A::Tridiagonal, b, u, Pl, Pr, | ||
465 | maxiters::Int, abstol, reltol, verbose::Bool, | ||
466 | assumptions::OperatorAssumptions) | ||
467 | ArrayInterface.lu_instance(A) | ||
468 | end | ||
469 | function init_cacheval(alg::GenericFactorization{typeof(lu!)}, A::SymTridiagonal{T, V}, b, u, Pl, Pr, | ||
470 | maxiters::Int, abstol, reltol, verbose::Bool, | ||
471 | assumptions::OperatorAssumptions) where {T, V} | ||
472 | LinearAlgebra.LDLt{T, SymTridiagonal{T, V}}(A) | ||
473 | end | ||
474 | function init_cacheval(alg::GenericFactorization{typeof(lu)}, A::SymTridiagonal{T, V}, b, u, Pl, Pr, | ||
475 | maxiters::Int, abstol, reltol, verbose::Bool, | ||
476 | assumptions::OperatorAssumptions) where {T, V} | ||
477 | LinearAlgebra.LDLt{T, SymTridiagonal{T, V}}(A) | ||
478 | end | ||
479 | |||
480 | function init_cacheval(alg::GenericFactorization{typeof(qr)}, A::AbstractMatrix, b, u, Pl, Pr, | ||
481 | maxiters::Int, | ||
482 | abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) | ||
483 | ArrayInterface.qr_instance(A) | ||
484 | end | ||
485 | function init_cacheval(alg::GenericFactorization{typeof(qr!)}, A::AbstractMatrix, b, u, Pl, Pr, | ||
486 | maxiters::Int, | ||
487 | abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) | ||
488 | ArrayInterface.qr_instance(A) | ||
489 | end | ||
490 | function init_cacheval(alg::GenericFactorization{typeof(qr)}, A::SymTridiagonal{T, V}, b, u, Pl, Pr, | ||
491 | maxiters::Int, abstol, reltol, verbose::Bool, | ||
492 | assumptions::OperatorAssumptions) where {T, V} | ||
493 | LinearAlgebra.LDLt{T, SymTridiagonal{T, V}}(A) | ||
494 | end | ||
495 | function init_cacheval(alg::GenericFactorization{typeof(qr!)}, A::SymTridiagonal{T, V}, b, u, Pl, Pr, | ||
496 | maxiters::Int, abstol, reltol, verbose::Bool, | ||
497 | assumptions::OperatorAssumptions) where {T, V} | ||
498 | LinearAlgebra.LDLt{T, SymTridiagonal{T, V}}(A) | ||
499 | end | ||
500 | |||
501 | function init_cacheval(alg::GenericFactorization{typeof(qr)}, | ||
502 | A::StridedMatrix{<:LinearAlgebra.BlasFloat}, b, u, Pl, Pr, | ||
503 | maxiters::Int, | ||
504 | abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) | ||
505 | ArrayInterface.qr_instance(A) | ||
506 | end | ||
507 | function init_cacheval(alg::GenericFactorization{typeof(qr!)}, | ||
508 | A::StridedMatrix{<:LinearAlgebra.BlasFloat}, b, u, Pl, Pr, | ||
509 | maxiters::Int, | ||
510 | abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) | ||
511 | ArrayInterface.qr_instance(A) | ||
512 | end | ||
513 | function init_cacheval(alg::GenericFactorization{typeof(qr)}, A::Diagonal, b, u, Pl, Pr, | ||
514 | maxiters::Int, abstol, reltol, verbose::Bool, | ||
515 | assumptions::OperatorAssumptions) | ||
516 | Diagonal(inv.(A.diag)) | ||
517 | end | ||
518 | function init_cacheval(alg::GenericFactorization{typeof(qr)}, A::Tridiagonal, b, u, Pl, Pr, | ||
519 | maxiters::Int, abstol, reltol, verbose::Bool, | ||
520 | assumptions::OperatorAssumptions) | ||
521 | ArrayInterface.qr_instance(A) | ||
522 | end | ||
523 | function init_cacheval(alg::GenericFactorization{typeof(qr!)}, A::Diagonal, b, u, Pl, Pr, | ||
524 | maxiters::Int, abstol, reltol, verbose::Bool, | ||
525 | assumptions::OperatorAssumptions) | ||
526 | Diagonal(inv.(A.diag)) | ||
527 | end | ||
528 | function init_cacheval(alg::GenericFactorization{typeof(qr!)}, A::Tridiagonal, b, u, Pl, Pr, | ||
529 | maxiters::Int, abstol, reltol, verbose::Bool, | ||
530 | assumptions::OperatorAssumptions) | ||
531 | ArrayInterface.qr_instance(A) | ||
532 | end | ||
533 | |||
534 | function init_cacheval(alg::GenericFactorization{typeof(svd)}, A::AbstractMatrix, b, u, Pl, Pr, | ||
535 | maxiters::Int, | ||
536 | abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) | ||
537 | ArrayInterface.svd_instance(A) | ||
538 | end | ||
539 | function init_cacheval(alg::GenericFactorization{typeof(svd!)}, A::AbstractMatrix, b, u, Pl, Pr, | ||
540 | maxiters::Int, | ||
541 | abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) | ||
542 | ArrayInterface.svd_instance(A) | ||
543 | end | ||
544 | |||
545 | function init_cacheval(alg::GenericFactorization{typeof(svd)}, | ||
546 | A::StridedMatrix{<:LinearAlgebra.BlasFloat}, b, u, Pl, Pr, | ||
547 | maxiters::Int, | ||
548 | abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) | ||
549 | ArrayInterface.svd_instance(A) | ||
550 | end | ||
551 | function init_cacheval(alg::GenericFactorization{typeof(svd!)}, | ||
552 | A::StridedMatrix{<:LinearAlgebra.BlasFloat}, b, u, Pl, Pr, | ||
553 | maxiters::Int, | ||
554 | abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) | ||
555 | ArrayInterface.svd_instance(A) | ||
556 | end | ||
557 | function init_cacheval(alg::GenericFactorization{typeof(svd)}, A::Diagonal, b, u, Pl, Pr, | ||
558 | maxiters::Int, abstol, reltol, verbose::Bool, | ||
559 | assumptions::OperatorAssumptions) | ||
560 | Diagonal(inv.(A.diag)) | ||
561 | end | ||
562 | function init_cacheval(alg::GenericFactorization{typeof(svd)}, A::Tridiagonal, b, u, Pl, Pr, | ||
563 | maxiters::Int, abstol, reltol, verbose::Bool, | ||
564 | assumptions::OperatorAssumptions) | ||
565 | ArrayInterface.svd_instance(A) | ||
566 | end | ||
567 | function init_cacheval(alg::GenericFactorization{typeof(svd!)}, A::Diagonal, b, u, Pl, Pr, | ||
568 | maxiters::Int, abstol, reltol, verbose::Bool, | ||
569 | assumptions::OperatorAssumptions) | ||
570 | Diagonal(inv.(A.diag)) | ||
571 | end | ||
572 | function init_cacheval(alg::GenericFactorization{typeof(svd!)}, A::Tridiagonal, b, u, Pl, | ||
573 | Pr, | ||
574 | maxiters::Int, abstol, reltol, verbose::Bool, | ||
575 | assumptions::OperatorAssumptions) | ||
576 | ArrayInterface.svd_instance(A) | ||
577 | end | ||
578 | function init_cacheval(alg::GenericFactorization{typeof(svd!)}, A::SymTridiagonal{T, V}, b, u, Pl, Pr, | ||
579 | maxiters::Int, abstol, reltol, verbose::Bool, | ||
580 | assumptions::OperatorAssumptions) where {T, V} | ||
581 | LinearAlgebra.LDLt{T, SymTridiagonal{T, V}}(A) | ||
582 | end | ||
583 | function init_cacheval(alg::GenericFactorization{typeof(svd)}, A::SymTridiagonal{T, V}, b, u, Pl, Pr, | ||
584 | maxiters::Int, abstol, reltol, verbose::Bool, | ||
585 | assumptions::OperatorAssumptions) where {T, V} | ||
586 | LinearAlgebra.LDLt{T, SymTridiagonal{T, V}}(A) | ||
587 | end | ||
588 | |||
589 | function init_cacheval(alg::GenericFactorization, A::Diagonal, b, u, Pl, Pr, maxiters::Int, | ||
590 | abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) | ||
591 | Diagonal(inv.(A.diag)) | ||
592 | end | ||
593 | function init_cacheval(alg::GenericFactorization, A::Tridiagonal, b, u, Pl, Pr, | ||
594 | maxiters::Int, | ||
595 | abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) | ||
596 | ArrayInterface.lu_instance(A) | ||
597 | end | ||
598 | function init_cacheval(alg::GenericFactorization, A::SymTridiagonal{T, V}, b, u, Pl, Pr, | ||
599 | maxiters::Int, abstol, reltol, verbose::Bool, | ||
600 | assumptions::OperatorAssumptions) where {T, V} | ||
601 | LinearAlgebra.LDLt{T, SymTridiagonal{T, V}}(A) | ||
602 | end | ||
603 | function init_cacheval(alg::GenericFactorization, A, b, u, Pl, Pr, | ||
604 | maxiters::Int, | ||
605 | abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) | ||
606 | init_cacheval(alg, convert(AbstractMatrix, A), b, u, Pl, Pr, | ||
607 | maxiters::Int, abstol, reltol, verbose::Bool, | ||
608 | assumptions::OperatorAssumptions) | ||
609 | end | ||
610 | function init_cacheval(alg::GenericFactorization, A::AbstractMatrix, b, u, Pl, Pr, | ||
611 | maxiters::Int, | ||
612 | abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) | ||
613 | do_factorization(alg, A, b, u) | ||
614 | end | ||
615 | |||
616 | function init_cacheval(alg::Union{GenericFactorization{typeof(bunchkaufman!)}, | ||
617 | GenericFactorization{typeof(bunchkaufman)}}, | ||
618 | A::Union{Hermitian, Symmetric}, b, u, Pl, Pr, maxiters::Int, abstol, | ||
619 | reltol, verbose::Bool, assumptions::OperatorAssumptions) | ||
620 | BunchKaufman(A.data, Array(1:size(A, 1)), A.uplo, true, false, 0) | ||
621 | end | ||
622 | |||
623 | function init_cacheval(alg::Union{GenericFactorization{typeof(bunchkaufman!)}, | ||
624 | GenericFactorization{typeof(bunchkaufman)}}, | ||
625 | A::StridedMatrix{<:LinearAlgebra.BlasFloat}, b, u, Pl, Pr, | ||
626 | maxiters::Int, | ||
627 | abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) | ||
628 | if eltype(A) <: Complex | ||
629 | return bunchkaufman!(Hermitian(A)) | ||
630 | else | ||
631 | return bunchkaufman!(Symmetric(A)) | ||
632 | end | ||
633 | end | ||
634 | |||
635 | # Fallback, tries to make nonsingular and just factorizes | ||
636 | # Try to never use it. | ||
637 | |||
638 | # Cholesky needs the posdef matrix, for GenericFactorization assume structure is needed | ||
639 | function init_cacheval(alg::GenericFactorization{typeof(cholesky)}, A::AbstractMatrix, b, u, Pl, Pr, | ||
640 | maxiters::Int, abstol, reltol, verbose::Bool, | ||
641 | assumptions::OperatorAssumptions) | ||
642 | newA = copy(convert(AbstractMatrix, A)) | ||
643 | do_factorization(alg, newA, b, u) | ||
644 | end | ||
645 | function init_cacheval(alg::GenericFactorization{typeof(cholesky!)}, A::AbstractMatrix, b, u, Pl, Pr, | ||
646 | maxiters::Int, abstol, reltol, verbose::Bool, | ||
647 | assumptions::OperatorAssumptions) | ||
648 | newA = copy(convert(AbstractMatrix, A)) | ||
649 | do_factorization(alg, newA, b, u) | ||
650 | end | ||
651 | function init_cacheval(alg::GenericFactorization{typeof(cholesky!)}, A::Diagonal, b, u, Pl, Pr, maxiters::Int, | ||
652 | abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) | ||
653 | Diagonal(inv.(A.diag)) | ||
654 | end | ||
655 | function init_cacheval(alg::GenericFactorization{typeof(cholesky!)}, A::Tridiagonal, b, u, Pl, Pr, | ||
656 | maxiters::Int, | ||
657 | abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) | ||
658 | ArrayInterface.lu_instance(A) | ||
659 | end | ||
660 | function init_cacheval(alg::GenericFactorization{typeof(cholesky!)}, A::SymTridiagonal{T, V}, b, u, Pl, Pr, | ||
661 | maxiters::Int, abstol, reltol, verbose::Bool, | ||
662 | assumptions::OperatorAssumptions) where {T, V} | ||
663 | LinearAlgebra.LDLt{T, SymTridiagonal{T, V}}(A) | ||
664 | end | ||
665 | function init_cacheval(alg::GenericFactorization{typeof(cholesky)}, A::Diagonal, b, u, Pl, Pr, maxiters::Int, | ||
666 | abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) | ||
667 | Diagonal(inv.(A.diag)) | ||
668 | end | ||
669 | function init_cacheval(alg::GenericFactorization{typeof(cholesky)}, A::Tridiagonal, b, u, Pl, Pr, | ||
670 | maxiters::Int, | ||
671 | abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) | ||
672 | ArrayInterface.lu_instance(A) | ||
673 | end | ||
674 | function init_cacheval(alg::GenericFactorization{typeof(cholesky)}, A::SymTridiagonal{T, V}, b, u, Pl, Pr, | ||
675 | maxiters::Int, abstol, reltol, verbose::Bool, | ||
676 | assumptions::OperatorAssumptions) where {T, V} | ||
677 | LinearAlgebra.LDLt{T, SymTridiagonal{T, V}}(A) | ||
678 | end | ||
679 | |||
680 | |||
681 | function init_cacheval(alg::GenericFactorization, | ||
682 | A::Union{Hermitian{T, <:SparseMatrixCSC}, | ||
683 | Symmetric{T, <:SparseMatrixCSC}}, b, u, Pl, Pr, | ||
684 | maxiters::Int, abstol, reltol, verbose::Bool, | ||
685 | assumptions::OperatorAssumptions) where {T} | ||
686 | newA = copy(convert(AbstractMatrix, A)) | ||
687 | do_factorization(alg, newA, b, u) | ||
688 | end | ||
689 | |||
690 | # Ambiguity handling dispatch | ||
691 | |||
692 | ################################## Factorizations which require solve! overloads | ||
693 | |||
694 | """ | ||
695 | `UMFPACKFactorization(;reuse_symbolic=true, check_pattern=true)` | ||
696 | |||
697 | A fast sparse multithreaded LU-factorization which specializes on sparsity | ||
698 | patterns with “more structure”. | ||
699 | |||
700 | !!! note | ||
701 | |||
702 | By default, the SparseArrays.jl are implemented for efficiency by caching the | ||
703 | symbolic factorization. I.e., if `set_A` is used, it is expected that the new | ||
704 | `A` has the same sparsity pattern as the previous `A`. If this algorithm is to | ||
705 | be used in a context where that assumption does not hold, set `reuse_symbolic=false`. | ||
706 | """ | ||
707 | Base.@kwdef struct UMFPACKFactorization <: AbstractFactorization | ||
708 | reuse_symbolic::Bool = true | ||
709 | check_pattern::Bool = true # Check factorization re-use | ||
710 | end | ||
711 | |||
712 | const PREALLOCATED_UMFPACK = SparseArrays.UMFPACK.UmfpackLU(SparseMatrixCSC(0, 0, [1], | ||
713 | Int[], Float64[])) | ||
714 | |||
715 | function init_cacheval(alg::UMFPACKFactorization, | ||
716 | A, b, u, Pl, Pr, | ||
717 | maxiters::Int, abstol, reltol, | ||
718 | verbose::Bool, assumptions::OperatorAssumptions) | ||
719 | nothing | ||
720 | end | ||
721 | |||
722 | function init_cacheval(alg::UMFPACKFactorization, A::SparseMatrixCSC{Float64, Int}, b, u, | ||
723 | Pl, Pr, | ||
724 | maxiters::Int, abstol, reltol, | ||
725 | verbose::Bool, assumptions::OperatorAssumptions) | ||
726 | PREALLOCATED_UMFPACK | ||
727 | end | ||
728 | |||
729 | function init_cacheval(alg::UMFPACKFactorization, A::AbstractSparseArray, b, u, Pl, Pr, | ||
730 | maxiters::Int, abstol, | ||
731 | reltol, | ||
732 | verbose::Bool, assumptions::OperatorAssumptions) | ||
733 | A = convert(AbstractMatrix, A) | ||
734 | zerobased = SparseArrays.getcolptr(A)[1] == 0 | ||
735 | return SparseArrays.UMFPACK.UmfpackLU(SparseMatrixCSC(size(A)..., getcolptr(A), | ||
736 | rowvals(A), nonzeros(A))) | ||
737 | end | ||
738 | |||
739 | function SciMLBase.solve!(cache::LinearCache, alg::UMFPACKFactorization; kwargs...) | ||
740 | A = cache.A | ||
741 | A = convert(AbstractMatrix, A) | ||
742 | if cache.isfresh | ||
743 | cacheval = @get_cacheval(cache, :UMFPACKFactorization) | ||
744 | if alg.reuse_symbolic | ||
745 | # Caches the symbolic factorization: https://github.com/JuliaLang/julia/pull/33738 | ||
746 | if alg.check_pattern && !(SparseArrays.decrement(SparseArrays.getcolptr(A)) == | ||
747 | cacheval.colptr && | ||
748 | SparseArrays.decrement(SparseArrays.getrowval(A)) == | ||
749 | cacheval.rowval) | ||
750 | fact = lu(SparseMatrixCSC(size(A)..., getcolptr(A), rowvals(A), | ||
751 | nonzeros(A))) | ||
752 | else | ||
753 | fact = lu!(cacheval, | ||
754 | SparseMatrixCSC(size(A)..., getcolptr(A), rowvals(A), | ||
755 | nonzeros(A))) | ||
756 | end | ||
757 | else | ||
758 | fact = lu(SparseMatrixCSC(size(A)..., getcolptr(A), rowvals(A), nonzeros(A))) | ||
759 | end | ||
760 | cache.cacheval = fact | ||
761 | cache.isfresh = false | ||
762 | end | ||
763 | |||
764 | y = ldiv!(cache.u, @get_cacheval(cache, :UMFPACKFactorization), cache.b) | ||
765 | SciMLBase.build_linear_solution(alg, y, nothing, cache) | ||
766 | end | ||
767 | |||
768 | """ | ||
769 | `KLUFactorization(;reuse_symbolic=true, check_pattern=true)` | ||
770 | |||
771 | A fast sparse LU-factorization which specializes on sparsity patterns with “less structure”. | ||
772 | |||
773 | !!! note | ||
774 | |||
775 | By default, the SparseArrays.jl are implemented for efficiency by caching the | ||
776 | symbolic factorization. I.e., if `set_A` is used, it is expected that the new | ||
777 | `A` has the same sparsity pattern as the previous `A`. If this algorithm is to | ||
778 | be used in a context where that assumption does not hold, set `reuse_symbolic=false`. | ||
779 | """ | ||
780 | Base.@kwdef struct KLUFactorization <: AbstractFactorization | ||
781 | reuse_symbolic::Bool = true | ||
782 | check_pattern::Bool = true | ||
783 | end | ||
784 | |||
785 | const PREALLOCATED_KLU = KLU.KLUFactorization(SparseMatrixCSC(0, 0, [1], Int[], | ||
786 | Float64[])) | ||
787 | |||
788 | function init_cacheval(alg::KLUFactorization, | ||
789 | A, b, u, Pl, Pr, | ||
790 | maxiters::Int, abstol, reltol, | ||
791 | verbose::Bool, assumptions::OperatorAssumptions) | ||
792 | nothing | ||
793 | end | ||
794 | |||
795 | function init_cacheval(alg::KLUFactorization, A::SparseMatrixCSC{Float64, Int}, b, u, Pl, | ||
796 | Pr, | ||
797 | maxiters::Int, abstol, reltol, | ||
798 | verbose::Bool, assumptions::OperatorAssumptions) | ||
799 | PREALLOCATED_KLU | ||
800 | end | ||
801 | |||
802 | function init_cacheval(alg::KLUFactorization, A::AbstractSparseArray, b, u, Pl, Pr, | ||
803 | maxiters::Int, abstol, | ||
804 | reltol, | ||
805 | verbose::Bool, assumptions::OperatorAssumptions) | ||
806 | A = convert(AbstractMatrix, A) | ||
807 | return KLU.KLUFactorization(SparseMatrixCSC(size(A)..., getcolptr(A), rowvals(A), | ||
808 | nonzeros(A))) | ||
809 | end | ||
810 | |||
811 | function SciMLBase.solve!(cache::LinearCache, alg::KLUFactorization; kwargs...) | ||
812 | A = cache.A | ||
813 | A = convert(AbstractMatrix, A) | ||
814 | |||
815 | if cache.isfresh | ||
816 | cacheval = @get_cacheval(cache, :KLUFactorization) | ||
817 | if cacheval !== nothing && alg.reuse_symbolic | ||
818 | if alg.check_pattern && !(SparseArrays.decrement(SparseArrays.getcolptr(A)) == | ||
819 | cacheval.colptr && | ||
820 | SparseArrays.decrement(SparseArrays.getrowval(A)) == cacheval.rowval) | ||
821 | fact = KLU.klu(SparseMatrixCSC(size(A)..., getcolptr(A), rowvals(A), | ||
822 | nonzeros(A))) | ||
823 | else | ||
824 | # If we have a cacheval already, run umfpack_symbolic to ensure the symbolic factorization exists | ||
825 | # This won't recompute if it does. | ||
826 | KLU.klu_analyze!(cacheval) | ||
827 | copyto!(cacheval.nzval, nonzeros(A)) | ||
828 | if cacheval._numeric === C_NULL # We MUST have a numeric factorization for reuse, unlike UMFPACK. | ||
829 | KLU.klu_factor!(cacheval) | ||
830 | end | ||
831 | fact = KLU.klu!(cacheval, | ||
832 | SparseMatrixCSC(size(A)..., getcolptr(A), rowvals(A), | ||
833 | nonzeros(A))) | ||
834 | end | ||
835 | else | ||
836 | # New fact each time since the sparsity pattern can change | ||
837 | # and thus it needs to reallocate | ||
838 | fact = KLU.klu(SparseMatrixCSC(size(A)..., getcolptr(A), rowvals(A), | ||
839 | nonzeros(A))) | ||
840 | end | ||
841 | cache.cacheval = fact | ||
842 | cache.isfresh = false | ||
843 | end | ||
844 | |||
845 | y = ldiv!(cache.u, @get_cacheval(cache, :KLUFactorization), cache.b) | ||
846 | SciMLBase.build_linear_solution(alg, y, nothing, cache) | ||
847 | end | ||
848 | |||
849 | ## CHOLMODFactorization | ||
850 | |||
851 | """ | ||
852 | `CHOLMODFactorization(; shift = 0.0, perm = nothing)` | ||
853 | |||
854 | A wrapper of CHOLMOD's polyalgorithm, mixing Cholesky factorization and ldlt. | ||
855 | Tries cholesky for performance and retries ldlt if conditioning causes Cholesky | ||
856 | to fail. | ||
857 | |||
858 | Only supports sparse matrices. | ||
859 | |||
860 | ## Keyword Arguments | ||
861 | |||
862 | * shift: the shift argument in CHOLMOD. | ||
863 | * perm: the perm argument in CHOLMOD | ||
864 | """ | ||
865 | Base.@kwdef struct CHOLMODFactorization{T} <: AbstractFactorization | ||
866 | shift::Float64 = 0.0 | ||
867 | perm::T = nothing | ||
868 | end | ||
869 | |||
870 | const PREALLOCATED_CHOLMOD = cholesky(SparseMatrixCSC(0, 0, [1], Int[], Float64[])) | ||
871 | |||
872 | function init_cacheval(alg::CHOLMODFactorization, | ||
873 | A, b, u, Pl, Pr, | ||
874 | maxiters::Int, abstol, reltol, | ||
875 | verbose::Bool, assumptions::OperatorAssumptions) | ||
876 | nothing | ||
877 | end | ||
878 | |||
879 | function init_cacheval(alg::CHOLMODFactorization, | ||
880 | A::Union{SparseMatrixCSC{T, Int}, Symmetric{T, SparseMatrixCSC{T, Int}}}, b, u, | ||
881 | Pl, Pr, | ||
882 | maxiters::Int, abstol, reltol, | ||
883 | verbose::Bool, assumptions::OperatorAssumptions) where {T <: Union{Float32, Float64}} | ||
884 | PREALLOCATED_CHOLMOD | ||
885 | end | ||
886 | |||
887 | function SciMLBase.solve!(cache::LinearCache, alg::CHOLMODFactorization; kwargs...) | ||
888 | A = cache.A | ||
889 | A = convert(AbstractMatrix, A) | ||
890 | |||
891 | if cache.isfresh | ||
892 | cacheval = @get_cacheval(cache, :CHOLMODFactorization) | ||
893 | fact = cholesky(A; check = false) | ||
894 | if !LinearAlgebra.issuccess(fact) | ||
895 | ldlt!(fact, A; check = false) | ||
896 | end | ||
897 | cache.cacheval = fact | ||
898 | cache.isfresh = false | ||
899 | end | ||
900 | |||
901 | cache.u .= @get_cacheval(cache, :CHOLMODFactorization) \ cache.b | ||
902 | SciMLBase.build_linear_solution(alg, cache.u, nothing, cache) | ||
903 | end | ||
904 | |||
905 | ## RFLUFactorization | ||
906 | |||
907 | """ | ||
908 | `RFLUFactorization()` | ||
909 | |||
910 | A fast pure Julia LU-factorization implementation | ||
911 | using RecursiveFactorization.jl. This is by far the fastest LU-factorization | ||
912 | implementation, usually outperforming OpenBLAS and MKL for smaller matrices | ||
913 | (<500x500), but currently optimized only for Base `Array` with `Float32` or `Float64`. | ||
914 | Additional optimization for complex matrices is in the works. | ||
915 | """ | ||
916 | struct RFLUFactorization{P, T} <: AbstractFactorization | ||
917 | RFLUFactorization(::Val{P}, ::Val{T}) where {P, T} = new{P, T}() | ||
918 | end | ||
919 | |||
920 | function RFLUFactorization(; pivot = Val(true), thread = Val(true)) | ||
921 | RFLUFactorization(pivot, thread) | ||
922 | end | ||
923 | |||
924 | function init_cacheval(alg::RFLUFactorization, A, b, u, Pl, Pr, maxiters::Int, | ||
925 | abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) | ||
926 | ipiv = Vector{LinearAlgebra.BlasInt}(undef, min(size(A)...)) | ||
927 | ArrayInterface.lu_instance(convert(AbstractMatrix, A)), ipiv | ||
928 | end | ||
929 | |||
930 | function init_cacheval(alg::RFLUFactorization, A::Matrix{Float64}, b, u, Pl, Pr, | ||
931 | maxiters::Int, | ||
932 | abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) | ||
933 | ipiv = Vector{LinearAlgebra.BlasInt}(undef, 0) | ||
934 | PREALLOCATED_LU, ipiv | ||
935 | end | ||
936 | |||
937 | function init_cacheval(alg::RFLUFactorization, | ||
938 | A::Union{AbstractSparseArray, AbstractSciMLOperator}, b, u, Pl, Pr, | ||
939 | maxiters::Int, | ||
940 | abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) | ||
941 | nothing, nothing | ||
942 | end | ||
943 | |||
944 | function init_cacheval(alg::RFLUFactorization, | ||
945 | A::Union{Diagonal, SymTridiagonal, Tridiagonal}, b, u, Pl, Pr, | ||
946 | maxiters::Int, | ||
947 | abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) | ||
948 | nothing, nothing | ||
949 | end | ||
950 | |||
951 | function SciMLBase.solve!(cache::LinearCache, alg::RFLUFactorization{P, T}; | ||
952 | kwargs...) where {P, T} | ||
953 | A = cache.A | ||
954 | A = convert(AbstractMatrix, A) | ||
955 | fact, ipiv = @get_cacheval(cache, :RFLUFactorization) | ||
956 | if cache.isfresh | ||
957 | if length(ipiv) != min(size(A)...) | ||
958 | ipiv = Vector{LinearAlgebra.BlasInt}(undef, min(size(A)...)) | ||
959 | end | ||
960 | fact = RecursiveFactorization.lu!(A, ipiv, Val(P), Val(T)) | ||
961 | cache.cacheval = (fact, ipiv) | ||
962 | cache.isfresh = false | ||
963 | end | ||
964 | y = ldiv!(cache.u, @get_cacheval(cache, :RFLUFactorization)[1], cache.b) | ||
965 | SciMLBase.build_linear_solution(alg, y, nothing, cache) | ||
966 | end | ||
967 | |||
968 | ## NormalCholeskyFactorization | ||
969 | |||
970 | """ | ||
971 | `NormalCholeskyFactorization(pivot = RowMaximum())` | ||
972 | |||
973 | A fast factorization which uses a Cholesky factorization on A * A'. Can be much | ||
974 | faster than LU factorization, but is not as numerically stable and thus should only | ||
975 | be applied to well-conditioned matrices. | ||
976 | |||
977 | ## Positional Arguments | ||
978 | |||
979 | * pivot: Defaults to RowMaximum(), but can be NoPivot() | ||
980 | """ | ||
981 | struct NormalCholeskyFactorization{P} <: AbstractFactorization | ||
982 | pivot::P | ||
983 | end | ||
984 | |||
985 | function NormalCholeskyFactorization(; pivot = nothing) | ||
986 | pivot === nothing && (pivot = NoPivot()) | ||
987 | NormalCholeskyFactorization(pivot) | ||
988 | end | ||
989 | |||
990 | default_alias_A(::NormalCholeskyFactorization, ::Any, ::Any) = true | ||
991 | default_alias_b(::NormalCholeskyFactorization, ::Any, ::Any) = true | ||
992 | |||
993 | const PREALLOCATED_NORMALCHOLESKY = ArrayInterface.cholesky_instance(rand(1, 1), NoPivot()) | ||
994 | |||
995 | function init_cacheval(alg::NormalCholeskyFactorization, | ||
996 | A::Union{AbstractSparseArray, GPUArraysCore.AnyGPUArray, | ||
997 | Symmetric{<:Number, <:AbstractSparseArray}}, b, u, Pl, Pr, | ||
998 | maxiters::Int, abstol, reltol, verbose::Bool, | ||
999 | assumptions::OperatorAssumptions) | ||
1000 | ArrayInterface.cholesky_instance(convert(AbstractMatrix, A)) | ||
1001 | end | ||
1002 | |||
1003 | function init_cacheval(alg::NormalCholeskyFactorization, A::SMatrix, b, u, Pl, Pr, | ||
1004 | maxiters::Int, abstol, reltol, verbose::Bool, | ||
1005 | assumptions::OperatorAssumptions) | ||
1006 | return cholesky(Symmetric((A)' * A)) | ||
1007 | end | ||
1008 | |||
1009 |
71 (26 %)
samples spent in init_cacheval
function init_cacheval(alg::NormalCholeskyFactorization, A, b, u, Pl, Pr,
71 (100 %) (incl.) when called from macro expansion line 310 |
||
1010 | maxiters::Int, abstol, reltol, verbose::Bool, | ||
1011 | assumptions::OperatorAssumptions) | ||
1012 | A_ = convert(AbstractMatrix, A) | ||
1013 | 71 (26 %) | return ArrayInterface.cholesky_instance(Symmetric((A)' * A), alg.pivot) | |
1014 | end | ||
1015 | |||
1016 | function init_cacheval(alg::NormalCholeskyFactorization, | ||
1017 | A::Union{Diagonal, AbstractSciMLOperator}, b, u, Pl, Pr, | ||
1018 | maxiters::Int, abstol, reltol, verbose::Bool, | ||
1019 | assumptions::OperatorAssumptions) | ||
1020 | nothing | ||
1021 | end | ||
1022 | |||
1023 | function SciMLBase.solve!(cache::LinearCache, alg::NormalCholeskyFactorization; kwargs...) | ||
1024 | A = cache.A | ||
1025 | A = convert(AbstractMatrix, A) | ||
1026 | if cache.isfresh | ||
1027 | if A isa SparseMatrixCSC || A isa GPUArraysCore.AnyGPUArray || A isa SMatrix | ||
1028 | fact = cholesky(Symmetric((A)' * A); check = false) | ||
1029 | else | ||
1030 | fact = cholesky(Symmetric((A)' * A), alg.pivot; check = false) | ||
1031 | end | ||
1032 | cache.cacheval = fact | ||
1033 | cache.isfresh = false | ||
1034 | end | ||
1035 | if A isa SparseMatrixCSC | ||
1036 | cache.u .= @get_cacheval(cache, :NormalCholeskyFactorization) \ (A' * cache.b) | ||
1037 | y = cache.u | ||
1038 | elseif A isa StaticArray | ||
1039 | cache.u = @get_cacheval(cache, :NormalCholeskyFactorization) \ (A' * cache.b) | ||
1040 | y = cache.u | ||
1041 | else | ||
1042 | y = ldiv!(cache.u, @get_cacheval(cache, :NormalCholeskyFactorization), A' * cache.b) | ||
1043 | end | ||
1044 | SciMLBase.build_linear_solution(alg, y, nothing, cache) | ||
1045 | end | ||
1046 | |||
1047 | ## NormalBunchKaufmanFactorization | ||
1048 | |||
1049 | """ | ||
1050 | `NormalBunchKaufmanFactorization(rook = false)` | ||
1051 | |||
1052 | A fast factorization which uses a BunchKaufman factorization on A * A'. Can be much | ||
1053 | faster than LU factorization, but is not as numerically stable and thus should only | ||
1054 | be applied to well-conditioned matrices. | ||
1055 | |||
1056 | ## Positional Arguments | ||
1057 | |||
1058 | * rook: whether to perform rook pivoting. Defaults to false. | ||
1059 | """ | ||
1060 | struct NormalBunchKaufmanFactorization <: AbstractFactorization | ||
1061 | rook::Bool | ||
1062 | end | ||
1063 | |||
1064 | function NormalBunchKaufmanFactorization(; rook = false) | ||
1065 | NormalBunchKaufmanFactorization(rook) | ||
1066 | end | ||
1067 | |||
1068 | default_alias_A(::NormalBunchKaufmanFactorization, ::Any, ::Any) = true | ||
1069 | default_alias_b(::NormalBunchKaufmanFactorization, ::Any, ::Any) = true | ||
1070 | |||
1071 | function init_cacheval(alg::NormalBunchKaufmanFactorization, A, b, u, Pl, Pr, | ||
1072 | maxiters::Int, abstol, reltol, verbose::Bool, | ||
1073 | assumptions::OperatorAssumptions) | ||
1074 | ArrayInterface.bunchkaufman_instance(convert(AbstractMatrix, A)) | ||
1075 | end | ||
1076 | |||
1077 | function SciMLBase.solve!(cache::LinearCache, alg::NormalBunchKaufmanFactorization; | ||
1078 | kwargs...) | ||
1079 | A = cache.A | ||
1080 | A = convert(AbstractMatrix, A) | ||
1081 | if cache.isfresh | ||
1082 | fact = bunchkaufman(Symmetric((A)' * A), alg.rook) | ||
1083 | cache.cacheval = fact | ||
1084 | cache.isfresh = false | ||
1085 | end | ||
1086 | y = ldiv!(cache.u, @get_cacheval(cache, :NormalBunchKaufmanFactorization), A' * cache.b) | ||
1087 | SciMLBase.build_linear_solution(alg, y, nothing, cache) | ||
1088 | end | ||
1089 | |||
1090 | ## DiagonalFactorization | ||
1091 | |||
1092 | """ | ||
1093 | `DiagonalFactorization()` | ||
1094 | |||
1095 | A special implementation only for solving `Diagonal` matrices fast. | ||
1096 | """ | ||
1097 | struct DiagonalFactorization <: AbstractFactorization end | ||
1098 | |||
1099 | function init_cacheval(alg::DiagonalFactorization, A, b, u, Pl, Pr, maxiters::Int, | ||
1100 | abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) | ||
1101 | nothing | ||
1102 | end | ||
1103 | |||
1104 | function SciMLBase.solve!(cache::LinearCache, alg::DiagonalFactorization; | ||
1105 | kwargs...) | ||
1106 | A = convert(AbstractMatrix, cache.A) | ||
1107 | if cache.u isa Vector && cache.b isa Vector | ||
1108 | @simd ivdep for i in eachindex(cache.u) | ||
1109 | cache.u[i] = A.diag[i] \ cache.b[i] | ||
1110 | end | ||
1111 | else | ||
1112 | cache.u .= A.diag .\ cache.b | ||
1113 | end | ||
1114 | SciMLBase.build_linear_solution(alg, cache.u, nothing, cache) | ||
1115 | end | ||
1116 | |||
1117 | ## FastLAPACKFactorizations | ||
1118 | |||
1119 | struct WorkspaceAndFactors{W, F} | ||
1120 | workspace::W | ||
1121 | factors::F | ||
1122 | end | ||
1123 | |||
1124 | # There's no options like pivot here. | ||
1125 | # But I'm not sure it makes sense as a GenericFactorization | ||
1126 | # since it just uses `LAPACK.getrf!`. | ||
1127 | """ | ||
1128 | `FastLUFactorization()` | ||
1129 | |||
1130 | The FastLapackInterface.jl version of the LU factorization. Notably, | ||
1131 | this version does not allow for choice of pivoting method. | ||
1132 | """ | ||
1133 | struct FastLUFactorization <: AbstractFactorization end | ||
1134 | |||
1135 | function init_cacheval(::FastLUFactorization, A, b, u, Pl, Pr, | ||
1136 | maxiters::Int, abstol, reltol, verbose::Bool, | ||
1137 | assumptions::OperatorAssumptions) | ||
1138 | ws = LUWs(A) | ||
1139 | return WorkspaceAndFactors(ws, ArrayInterface.lu_instance(convert(AbstractMatrix, A))) | ||
1140 | end | ||
1141 | |||
1142 | function SciMLBase.solve!(cache::LinearCache, alg::FastLUFactorization; kwargs...) | ||
1143 | A = cache.A | ||
1144 | A = convert(AbstractMatrix, A) | ||
1145 | ws_and_fact = @get_cacheval(cache, :FastLUFactorization) | ||
1146 | if cache.isfresh | ||
1147 | # we will fail here if A is a different *size* than in a previous version of the same cache. | ||
1148 | # it may instead be desirable to resize the workspace. | ||
1149 | @set! ws_and_fact.factors = LinearAlgebra.LU(LAPACK.getrf!(ws_and_fact.workspace, | ||
1150 | A)...) | ||
1151 | cache.cacheval = ws_and_fact | ||
1152 | cache.isfresh = false | ||
1153 | end | ||
1154 | y = ldiv!(cache.u, cache.cacheval.factors, cache.b) | ||
1155 | SciMLBase.build_linear_solution(alg, y, nothing, cache) | ||
1156 | end | ||
1157 | |||
1158 | """ | ||
1159 | `FastQRFactorization()` | ||
1160 | |||
1161 | The FastLapackInterface.jl version of the QR factorization. | ||
1162 | """ | ||
1163 | struct FastQRFactorization{P} <: AbstractFactorization | ||
1164 | pivot::P | ||
1165 | blocksize::Int | ||
1166 | end | ||
1167 | |||
1168 | # is 36 or 16 better here? LinearAlgebra and FastLapackInterface use 36, | ||
1169 | # but QRFactorization uses 16. | ||
1170 | FastQRFactorization() = FastQRFactorization(NoPivot(), 36) | ||
1171 | |||
1172 | function init_cacheval(alg::FastQRFactorization{NoPivot}, A::AbstractMatrix, b, u, Pl, Pr, | ||
1173 | maxiters::Int, abstol, reltol, verbose::Bool, | ||
1174 | assumptions::OperatorAssumptions) | ||
1175 | ws = QRWYWs(A; blocksize = alg.blocksize) | ||
1176 | return WorkspaceAndFactors(ws, | ||
1177 | ArrayInterface.qr_instance(convert(AbstractMatrix, A))) | ||
1178 | end | ||
1179 | function init_cacheval(::FastQRFactorization{ColumnNorm}, A::AbstractMatrix, b, u, Pl, Pr, | ||
1180 | maxiters::Int, abstol, reltol, verbose::Bool, | ||
1181 | assumptions::OperatorAssumptions) | ||
1182 | ws = QRpWs(A) | ||
1183 | return WorkspaceAndFactors(ws, | ||
1184 | ArrayInterface.qr_instance(convert(AbstractMatrix, A))) | ||
1185 | end | ||
1186 | |||
1187 | function init_cacheval(alg::FastQRFactorization, A, b, u, Pl, Pr, | ||
1188 | maxiters::Int, abstol, reltol, verbose::Bool, | ||
1189 | assumptions::OperatorAssumptions) | ||
1190 | return init_cacheval(alg, convert(AbstractMatrix, A), b, u, Pl, Pr, | ||
1191 | maxiters::Int, abstol, reltol, verbose::Bool, | ||
1192 | assumptions::OperatorAssumptions) | ||
1193 | end | ||
1194 | |||
1195 | function SciMLBase.solve!(cache::LinearCache, alg::FastQRFactorization{P}; | ||
1196 | kwargs...) where {P} | ||
1197 | A = cache.A | ||
1198 | A = convert(AbstractMatrix, A) | ||
1199 | ws_and_fact = @get_cacheval(cache, :FastQRFactorization) | ||
1200 | if cache.isfresh | ||
1201 | # we will fail here if A is a different *size* than in a previous version of the same cache. | ||
1202 | # it may instead be desirable to resize the workspace. | ||
1203 | if P === NoPivot | ||
1204 | @set! ws_and_fact.factors = LinearAlgebra.QRCompactWY(LAPACK.geqrt!(ws_and_fact.workspace, | ||
1205 | A)...) | ||
1206 | else | ||
1207 | @set! ws_and_fact.factors = LinearAlgebra.QRPivoted(LAPACK.geqp3!(ws_and_fact.workspace, | ||
1208 | A)...) | ||
1209 | end | ||
1210 | cache.cacheval = ws_and_fact | ||
1211 | cache.isfresh = false | ||
1212 | end | ||
1213 | y = ldiv!(cache.u, cache.cacheval.factors, cache.b) | ||
1214 | SciMLBase.build_linear_solution(alg, y, nothing, cache) | ||
1215 | end | ||
1216 | |||
1217 | ## SparspakFactorization is here since it's MIT licensed, not GPL | ||
1218 | |||
1219 | """ | ||
1220 | `SparspakFactorization(reuse_symbolic = true)` | ||
1221 | |||
1222 | This is the translation of the well-known sparse matrix software Sparspak | ||
1223 | (Waterloo Sparse Matrix Package), solving | ||
1224 | large sparse systems of linear algebraic equations. Sparspak is composed of the | ||
1225 | subroutines from the book "Computer Solution of Large Sparse Positive Definite | ||
1226 | Systems" by Alan George and Joseph Liu. Originally written in Fortran 77, later | ||
1227 | rewritten in Fortran 90. Here is the software translated into Julia. | ||
1228 | |||
1229 | The Julia rewrite is released under the MIT license with an express permission | ||
1230 | from the authors of the Fortran package. The package uses multiple | ||
1231 | dispatch to route around standard BLAS routines in the case e.g. of arbitrary-precision | ||
1232 | floating point numbers or ForwardDiff.Dual. | ||
1233 | This e.g. allows for Automatic Differentiation (AD) of a sparse-matrix solve. | ||
1234 | """ | ||
1235 | Base.@kwdef struct SparspakFactorization <: AbstractFactorization | ||
1236 | reuse_symbolic::Bool = true | ||
1237 | end | ||
1238 | |||
1239 | const PREALLOCATED_SPARSEPAK = sparspaklu(SparseMatrixCSC(0, 0, [1], Int[], Float64[]), | ||
1240 | factorize = false) | ||
1241 | |||
1242 | function init_cacheval(alg::SparspakFactorization, | ||
1243 | A::Union{Matrix, Nothing, AbstractSciMLOperator}, b, u, Pl, Pr, | ||
1244 | maxiters::Int, abstol, reltol, | ||
1245 | verbose::Bool, assumptions::OperatorAssumptions) | ||
1246 | nothing | ||
1247 | end | ||
1248 | |||
1249 | function init_cacheval(::SparspakFactorization, A::SparseMatrixCSC{Float64, Int}, b, u, Pl, | ||
1250 | Pr, maxiters::Int, abstol, | ||
1251 | reltol, | ||
1252 | verbose::Bool, assumptions::OperatorAssumptions) | ||
1253 | PREALLOCATED_SPARSEPAK | ||
1254 | end | ||
1255 | |||
1256 | function init_cacheval(::SparspakFactorization, A, b, u, Pl, Pr, maxiters::Int, abstol, | ||
1257 | reltol, | ||
1258 | verbose::Bool, assumptions::OperatorAssumptions) | ||
1259 | A = convert(AbstractMatrix, A) | ||
1260 | if A isa SparseArrays.AbstractSparseArray | ||
1261 | return sparspaklu(SparseMatrixCSC(size(A)..., getcolptr(A), rowvals(A), | ||
1262 | nonzeros(A)), | ||
1263 | factorize = false) | ||
1264 | else | ||
1265 | return sparspaklu(SparseMatrixCSC(0, 0, [1], Int[], eltype(A)[]), | ||
1266 | factorize = false) | ||
1267 | end | ||
1268 | end | ||
1269 | |||
1270 | function init_cacheval(::SparspakFactorization, ::StaticArray, b, u, Pl, Pr, | ||
1271 | maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) | ||
1272 | nothing | ||
1273 | end | ||
1274 | |||
1275 | function SciMLBase.solve!(cache::LinearCache, alg::SparspakFactorization; kwargs...) | ||
1276 | A = cache.A | ||
1277 | if cache.isfresh | ||
1278 | if cache.cacheval !== nothing && alg.reuse_symbolic | ||
1279 | fact = sparspaklu!(@get_cacheval(cache, :SparspakFactorization), | ||
1280 | SparseMatrixCSC(size(A)..., getcolptr(A), rowvals(A), | ||
1281 | nonzeros(A))) | ||
1282 | else | ||
1283 | fact = sparspaklu(SparseMatrixCSC(size(A)..., getcolptr(A), rowvals(A), | ||
1284 | nonzeros(A))) | ||
1285 | end | ||
1286 | cache.cacheval = fact | ||
1287 | cache.isfresh = false | ||
1288 | end | ||
1289 | y = ldiv!(cache.u, @get_cacheval(cache, :SparspakFactorization), cache.b) | ||
1290 | SciMLBase.build_linear_solution(alg, y, nothing, cache) | ||
1291 | end | ||
1292 | |||
1293 | for alg in InteractiveUtils.subtypes(AbstractFactorization) | ||
1294 | @eval function init_cacheval(alg::$alg, A::MatrixOperator, b, u, Pl, Pr, | ||
1295 | maxiters::Int, abstol, reltol, verbose::Bool, | ||
1296 | assumptions::OperatorAssumptions) | ||
1297 | init_cacheval(alg, A.A, b, u, Pl, Pr, | ||
1298 | maxiters::Int, abstol, reltol, verbose::Bool, | ||
1299 | assumptions::OperatorAssumptions) | ||
1300 | end | ||
1301 | end |