Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Implement finite section based adaptive QL #125

Merged
merged 36 commits into from
May 8, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
36 commits
Select commit Hold shift + click to select a range
f5d023f
basic implementation of finite section based QL
TSGut Jan 26, 2023
b1f0405
import AbstractCachedMatrix
TSGut Jan 26, 2023
30bd77b
bug fixes + basic tests work
TSGut Jan 26, 2023
f3454ae
more tests and lowertriangular layout
TSGut Jan 27, 2023
a88b417
simplify loops somewhat
TSGut Jan 27, 2023
486de72
stop checking full finite sections + refactor
TSGut Jan 29, 2023
b261384
refactor before working on matching QL structure
TSGut Feb 7, 2023
6a43660
merge latest pr
TSGut Feb 7, 2023
9b2ba90
change underlying structs
TSGut Feb 8, 2023
3074dca
work towards expanding tau and factors simultaneously
TSGut Feb 8, 2023
d412e6a
Merge pull request #2 from JuliaLinearAlgebra/master
TSGut Apr 4, 2023
246afb8
Update infql.jl
TSGut Apr 4, 2023
802827b
explicitly import some lazyarrays functions + type declarations
TSGut Apr 5, 2023
a1c6465
Merge remote-tracking branch 'origin/master'
TSGut Apr 5, 2023
9458aea
big style overhaul
TSGut Apr 20, 2023
3608e71
Update src/infql.jl
TSGut Apr 21, 2023
2d44c60
tidying, test fix, todo comment
TSGut Apr 21, 2023
d9ae9e3
move getindex from here to LazyArrays
TSGut Apr 22, 2023
9148dcc
Update Project.toml
TSGut May 4, 2023
116bc55
change Q implementation to be lazy
TSGut May 5, 2023
00a8a00
Merge remote-tracking branch 'origin/master'
TSGut May 5, 2023
d3a0bcf
import the default getQ
TSGut May 6, 2023
8e5e420
bug fix and tidy lazy implementation
TSGut May 6, 2023
e83b44b
fix adjoint
TSGut May 6, 2023
f77c12d
reinstatate some changes
TSGut May 6, 2023
8732cb8
add tests, remove a function
TSGut May 6, 2023
e46aa07
add a test for complex entries
TSGut May 6, 2023
753d6ad
add test for ldiv behavior of Q
TSGut May 6, 2023
c6cbf07
LazyVector -> LayoutVector
dlfivefifty May 6, 2023
677be50
Merge branch 'master' of https://github.com/TSGut/InfiniteLinearAlgeb…
dlfivefifty May 6, 2023
ea4b1f4
nzzeros -> last(colsupport
dlfivefifty May 6, 2023
1ee6014
restore non-tridiagonal functionality
TSGut May 6, 2023
3344bc0
Update test_infql.jl
dlfivefifty May 6, 2023
d57a574
Add matrix tests, remove unnecessary * overrides
dlfivefifty May 6, 2023
0d1a275
lazy multiplication by Q
TSGut May 7, 2023
be7b705
Update Project.toml
dlfivefifty May 8, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions Project.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
name = "InfiniteLinearAlgebra"
uuid = "cde9dba0-b1de-11e9-2c62-0bab9446c55c"
version = "0.6.17"
version = "0.6.18"

[deps]
ArrayLayouts = "4c555306-a7a7-4459-81d9-ec55ddd5c99a"
Expand All @@ -25,7 +25,7 @@ BlockBandedMatrices = "0.11.5, 0.12"
DSP = "0.7"
FillArrays = "0.13, 1"
InfiniteArrays = "0.12"
LazyArrays = "0.22, 1"
LazyArrays = "1.1.1"
LazyBandedMatrices = "0.8.7"
MatrixFactorizations = "0.9.6, 1"
SemiseparableMatrices = "0.3"
Expand Down
8 changes: 4 additions & 4 deletions src/InfiniteLinearAlgebra.jl
Original file line number Diff line number Diff line change
Expand Up @@ -12,17 +12,17 @@ import Base.Broadcast: BroadcastStyle, Broadcasted, broadcasted
import ArrayLayouts: colsupport, rowsupport, triangularlayout, MatLdivVec, triangulardata, TriangularLayout, TridiagonalLayout,
sublayout, _qr, __qr, MatLmulVec, MatLmulMat, AbstractQLayout, materialize!, diagonaldata, subdiagonaldata, supdiagonaldata,
_bidiag_forwardsub!, mulreduce, RangeCumsum, _factorize, transposelayout, ldiv!, lmul!, mul, CNoPivot
import BandedMatrices: BandedMatrix, _BandedMatrix, AbstractBandedMatrix, bandeddata, bandwidths, BandedColumns, bandedcolumns,
import BandedMatrices: BandedMatrix, _BandedMatrix, AbstractBandedMatrix, bandeddata, bandwidths, BandedColumns, bandedcolumns, BandedLayout,
_default_banded_broadcast, banded_similar
import FillArrays: AbstractFill, getindex_value, axes_print_matrix_row
import InfiniteArrays: OneToInf, InfUnitRange, Infinity, PosInfinity, InfiniteCardinal, InfStepRange, AbstractInfUnitRange, InfAxes, InfRanges
import LinearAlgebra: matprod, qr, AbstractTriangular, AbstractQ, adjoint, transpose, AdjOrTrans
import LazyArrays: applybroadcaststyle, CachedArray, CachedMatrix, CachedVector, DenseColumnMajor, FillLayout, ApplyMatrix, check_mul_axes, LazyArrayStyle,
resizedata!, MemoryLayout,
factorize, sub_materialize, LazyLayout, LazyArrayStyle, layout_getindex,
applylayout, ApplyLayout, PaddedLayout, CachedLayout, cacheddata, zero!, MulAddStyle,
LazyArray, LazyMatrix, LazyVector, paddeddata, arguments
import MatrixFactorizations: ul, ul!, _ul, ql, ql!, _ql, QLPackedQ, getL, getR, getU, reflector!, reflectorApply!, QL, QR, QRPackedQ,
applylayout, ApplyLayout, PaddedLayout, CachedLayout, AbstractCachedVector, AbstractCachedMatrix, cacheddata, zero!, MulAddStyle, ApplyArray,
LazyArray, LazyMatrix, LazyVector, paddeddata, arguments, resizedata!
import MatrixFactorizations: ul, ul!, _ul, ql, ql!, _ql, QLPackedQ, getL, getR, getQ, getU, reflector!, reflectorApply!, QL, QR, QRPackedQ,
QRPackedQLayout, AdjQRPackedQLayout, QLPackedQLayout, AdjQLPackedQLayout, LayoutQ

import BlockArrays: AbstractBlockVecOrMat, sizes_from_blocks, _length, BlockedUnitRange, blockcolsupport, BlockLayout, AbstractBlockLayout, BlockSlice
Expand Down
4 changes: 2 additions & 2 deletions src/banded/hessenbergq.jl
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ function materialize!(L::MatLmulVec{<:HessenbergQLayout{'L'}})
Q, x = L.A,L.B
T = eltype(Q)
t = Array{T}(undef, 2)
nz = nzzeros(x,1)
nz = last(colsupport(x))
for n = 1:length(Q.q)
v = view(x, n:n+1)
mul!(t, Q.q[n], v)
Expand All @@ -126,7 +126,7 @@ function materialize!(L::MatLmulVec{<:HessenbergQLayout{'U'}})
Q, x = L.A,L.B
T = eltype(Q)
t = Array{T}(undef, 2)
for n = min(length(Q.q),nzzeros(x,1)):-1:1
for n = min(length(Q.q),last(colsupport(x))):-1:1
v = view(x, n:n+1)
mul!(t, Q.q[n], v)
v .= t
Expand Down
199 changes: 170 additions & 29 deletions src/infql.jl
Original file line number Diff line number Diff line change
Expand Up @@ -92,23 +92,9 @@ function ql_hessenberg!(B::InfBandedMatrix{TT}; kwds...) where TT
QLHessenberg(_BandedMatrix(H, ℵ₀, l, 1), Vcat( LowerHessenbergQ(F.Q).q, F∞.q))
end

getindex(Q::QLPackedQ{T,<:InfBandedMatrix{T}}, i::Int, j::Int) where T =
(Q'*[Zeros{T}(i-1); one(T); Zeros{T}(∞)])[j]'
getindex(Q::QLPackedQ{<:Any,<:InfBandedMatrix}, I::AbstractVector{Int}, J::AbstractVector{Int}) =
[Q[i,j] for i in I, j in J]

getL(Q::QL, ::NTuple{2,InfiniteCardinal{0}}) = LowerTriangular(Q.factors)
getL(Q::QLHessenberg, ::NTuple{2,InfiniteCardinal{0}}) = LowerTriangular(Q.factors)

# number of structural non-zeros in axis k
nzzeros(A::AbstractArray, k) = size(A,k)
nzzeros(::Zeros, k) = 0
nzzeros(B::Vcat, k) = sum(size.(B.args[1:end-1],k))
nzzeros(B::CachedArray, k) = max(B.datasize[k], nzzeros(B.array,k))
function nzzeros(B::AbstractMatrix, k)
l,u = bandwidths(B)
k == 1 ? size(B,2) + l : size(B,1) + u
end

function materialize!(M::Lmul{<:QLPackedQLayout{<:BandedColumns},<:PaddedLayout})
A,B = M.A,M.B
Expand All @@ -123,7 +109,7 @@ function materialize!(M::Lmul{<:QLPackedQLayout{<:BandedColumns},<:PaddedLayout}
D = Afactors.data
for k = 1:∞
ν = k
allzero = k > nzzeros(B,1) ? true : false
allzero = k > last(colsupport(B)) ? true : false
for j = 1:nB
vBj = B[k,j]
for i = max(1,ν-u):k-1
Expand Down Expand Up @@ -157,7 +143,7 @@ function materialize!(M::Lmul{<:AdjQLPackedQLayout{<:BandedColumns},<:PaddedLayo
l,u = bandwidths(Afactors)
D = Afactors.data
@inbounds begin
for k = nzzeros(B,1)+u:-1:1
for k = last(colsupport(B))+u:-1:1
ν = k
for j = 1:nB
vBj = B[k,j]
Expand All @@ -175,15 +161,6 @@ function materialize!(M::Lmul{<:AdjQLPackedQLayout{<:BandedColumns},<:PaddedLayo
B
end

function _lmul_cache(A::Union{AbstractMatrix{T},AbstractQ{T}}, x::AbstractVector{S}) where {T,S}
TS = promote_op(matprod, T, S)
lmul!(A, cache(convert(AbstractVector{TS},x)))
end

(*)(A::QLPackedQ{T,<:InfBandedMatrix}, x::AbstractVector) where {T} = _lmul_cache(A, x)
(*)(A::AdjointQtype{T,<:QLPackedQ{T,<:InfBandedMatrix}}, x::AbstractVector) where {T} = _lmul_cache(A, x)
(*)(A::QLPackedQ{T,<:InfBandedMatrix}, x::LazyVector) where {T} = _lmul_cache(A, x)
(*)(A::AdjointQtype{T,<:QLPackedQ{T,<:InfBandedMatrix}}, x::LazyVector) where {T} = _lmul_cache(A, x)


function blocktailiterate(c,a,b, d=c, e=a)
Expand Down Expand Up @@ -246,7 +223,7 @@ function lmul!(adjA::AdjointQtype{<:Any,<:QLPackedQ{<:Any,<:InfBlockBandedMatrix
l = 2l+1
u = 2u+1
@inbounds begin
for k = nzzeros(B,1)+u:-1:1
for k = last(colsupport(B))+u:-1:1
ν = k
for j = 1:nB
vBj = B[k,j]
Expand Down Expand Up @@ -290,7 +267,7 @@ function materialize!(M::MatLdivVec{<:TriangularLayout{'L','N',BandedColumns{Per
throw(DimensionMismatch("second dimension of left hand side A, $n, and length of right hand side b, $(length(b)), must be equal"))
end
data = triangulardata(A)
nz = nzzeros(b,1)
nz = last(colsupport(b))
@inbounds for j in 1:n
iszero(data[j,j]) && throw(SingularException(j))
bj = b[j] = data[j,j] \ b[j]
Expand Down Expand Up @@ -333,8 +310,6 @@ function _ql(::SymTridiagonalLayout, ::NTuple{2,OneToInf{Int}}, A, args...; kwds
ql(_BandedMatrix(Hcat(dat, [ev∞,d∞,ev∞] * Ones{T}(1,∞)), ℵ₀, 1, 1), args...; kwds...)
end



# TODO: This should be redesigned as ql(BandedMatrix(A))
# But we need to support dispatch on axes
function _ql(::TridiagonalLayout, ::NTuple{2,OneToInf{Int}}, A, args...; kwds...)
Expand Down Expand Up @@ -378,3 +353,169 @@ function LazyBandedMatrices._SymTridiagonal(::Tuple{TriangularLayout{'L', 'N', P
ev = [A[k,k+1] for k=1:m-1]
SymTridiagonal([dv; Fill(dv[end],∞)], [ev; Fill(ev[end],∞)])
end

###
# Experimental adaptive finite section QL
###
mutable struct AdaptiveQLTau{T} <: AbstractCachedVector{T}
data::Vector{T}
M::AbstractMatrix{T}
datasize::Integer
tol::Real
AdaptiveQLTau{T}(D, M, N::Integer, tol) where T = new{T}(D, M, N, tol)
end
mutable struct AdaptiveQLFactors{T} <: AbstractCachedMatrix{T}
data::BandedMatrix{T}
M::AbstractMatrix{T}
datasize::Tuple{Int, Int}
tol::Real
AdaptiveQLFactors{T}(D, M, N::Tuple{Int, Int}, tol) where T = new{T}(D, M, N, tol)
end

size(::AdaptiveQLFactors) = (ℵ₀, ℵ₀)
size(::AdaptiveQLTau) = (ℵ₀, )

# adaptive QL accepts optional tolerance
function ql(A::InfBandedMatrix{T}, tol = eps(float(real(T)))) where T
factors, τ = initialadaptiveQLblock(A, tol)
QL(AdaptiveQLFactors{T}(factors, A, size(factors), tol),AdaptiveQLTau{T}(τ, A, length(τ), tol))
end

# Computes the initial data for the finite section based QL decomposition
function initialadaptiveQLblock(A::AbstractMatrix{T}, tol) where T
maxN = 10000 # Prevent runaway loop
j = 50 # We initialize with a 50 × 50 block that is adaptively expanded
Lerr = one(real(T))
N = j
checkinds = max(1,j-bandwidth(A,1)-bandwidth(A,2))
@inbounds Ls = ql(A[checkinds:N,checkinds:N]).L[2:j-checkinds+1,2:j-checkinds+1]
@inbounds while Lerr > tol
# compute QL for small finite section and large finite section
Ll = ql(A[checkinds:2N,checkinds:2N]).L[2:j-checkinds+1,2:j-checkinds+1]
# compare bottom right sections and stop if desired level of convergence achieved
Lerr = norm(Ll-Ls,2)
if N == maxN
error("Reached max. iterations in adaptive QL without convergence to desired tolerance.")
end
Ls = Ll
N = 2*N
end
F = ql(A[1:(N÷2),1:(N÷2)])
return (F.factors[1:50,1:50], F.τ[1:50])
end

# Resize and filling functions for cached implementation
function resizedata!(K::AdaptiveQLFactors, nm...)
nm = maximum(nm)
νμ = K.datasize[1]
if nm > νμ
olddata = copy(K.data)
K.data = similar(K.data, nm, nm)
K.data[axes(olddata)...] = olddata
inds = νμ:nm
cache_filldata!(K, inds)
K.datasize = size(K.data)
end
K
end

function resizedata!(K::AdaptiveQLTau, nm...)
nm = maximum(nm)
νμ = K.datasize
if nm > νμ
resize!(K.data,nm)
cache_filldata!(K, νμ:nm)
K.datasize = size(K.data,1)
end
K
end

function cache_filldata!(A::AdaptiveQLFactors{T}, inds::UnitRange{Int}) where T
j = maximum(inds)
maxN = 1000*j # Prevent runaway loop
Lerr = one(real(T))
N = j
checkinds = max(1,j-bandwidth(A.M,1)-bandwidth(A.M,2))
@inbounds Ls = ql(A.M[checkinds:N,checkinds:N]).L[2:j-checkinds+1,2:j-checkinds+1]
@inbounds while Lerr > A.tol
# compute QL for small finite section and large finite section
Ll = ql(A.M[checkinds:2N,checkinds:2N]).L[2:j-checkinds+1,2:j-checkinds+1]
# compare bottom right sections and stop if desired level of convergence achieved
Lerr = norm(Ll-Ls,2)
if N == maxN
error("Reached max. iterations in adaptive QL without convergence to desired tolerance.")
end
Ls = Ll
N = 2*N
end
A.data = ql(A.M[1:(N÷2),1:(N÷2)]).factors[1:j,1:j]
end

function cache_filldata!(A::AdaptiveQLTau{T}, inds::UnitRange{Int}) where T
j = maximum(inds)
maxN = 1000*j
Lerr = one(real(T))
N = j
checkinds = max(1,j-bandwidth(A.M,1)-bandwidth(A.M,2))
@inbounds Ls = ql(A.M[checkinds:N,checkinds:N]).L[2:j-checkinds+1,2:j-checkinds+1]
@inbounds while Lerr > A.tol
# compute QL for small finite section and large finite section
Ll = ql(A.M[checkinds:2N,checkinds:2N]).L[2:j-checkinds+1,2:j-checkinds+1]
# compare bottom right sections and stop if desired level of convergence achieved
Lerr = norm(Ll-Ls,2)
if N == maxN
error("Reached max. iterations in adaptive QL without convergence to desired tolerance.")
end
Ls = Ll
N = 2*N
end
A.data = ql(A.M[1:(N÷2),1:(N÷2)]).τ[1:j]
end

# TODO: adaptively build L*b using caching and forward-substitution
*(L::LowerTriangular{T, AdaptiveQLFactors{T}}, b::LayoutVector) where T = ApplyArray(*, L, b)

MemoryLayout(::AdaptiveQLFactors) = LazyBandedLayout()
bandwidths(F::AdaptiveQLFactors) = bandwidths(F.data)

# Q = \\prod_{i=1}^{\\min(m,n)} (I - \\tau_i v_i v_i^T)
getindex(Q::QLPackedQ{T,<:AdaptiveQLFactors{T}}, i::Int, j::Int) where T =
(Q'*[Zeros{T}(i-1); one(T); Zeros{T}(∞)])[j]'
dlfivefifty marked this conversation as resolved.
Show resolved Hide resolved
getindex(Q::QLPackedQ{<:Any,<:AdaptiveQLFactors}, I::AbstractVector{Int}, J::AbstractVector{Int}) =
[Q[i,j] for i in I, j in J]
dlfivefifty marked this conversation as resolved.
Show resolved Hide resolved
getindex(Q::QLPackedQ{<:Any,<:AdaptiveQLFactors}, I::Int, J::UnitRange{Int}) =
TSGut marked this conversation as resolved.
Show resolved Hide resolved
[Q[i,j] for i in I, j in J]
getindex(Q::QLPackedQ{<:Any,<:AdaptiveQLFactors}, I::UnitRange{Int}, J::Int) =
[Q[i,j] for i in I, j in J]

materialize!(M::Lmul{<:QLPackedQLayout{<:LazyLayout},<:PaddedLayout}) = ApplyArray(*,M.A,M.B)

function materialize!(M::Lmul{<:AdjQLPackedQLayout{<:LazyLayout},<:PaddedLayout})
adjA,B = M.A,M.B
A = parent(adjA)
mA, nA = size(A.factors)
mB, nB = size(B,1), size(B,2)
if mA != mB
throw(DimensionMismatch("matrix A has dimensions ($mA,$nA) but B has dimensions ($mB, $nB)"))
end
Afactors = A.factors
l,u = bandwidths(Afactors)
l = 2l+1
u = 2u+1
@inbounds begin
for k = last(colsupport(B))+u:-1:1
for j = 1:nB
vBj = B[k,j]
for i = max(1,k-u):k-1
vBj += conj(Afactors[i,k])*B[i,j]
end
vBj = conj(A.τ[k])*vBj
B[k,j] -= vBj
for i = max(1,k-u):k-1
B[i,j] -= Afactors[i,k]*vBj
end
end
end
end
B
end
Loading