Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix and then specialize sparse broadcast!(f, C, A), strengthen tests #19986

Merged
merged 4 commits into from
Jan 21, 2017
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
105 changes: 93 additions & 12 deletions base/sparse/higherorderfns.jl
Original file line number Diff line number Diff line change
Expand Up @@ -19,11 +19,12 @@ using ..SparseArrays: SparseVector, SparseMatrixCSC, AbstractSparseArray, indtyp
# (4) Define _map_[not]zeropres! specialized for a single (input) sparse vector/matrix.
# (5) Define _map_[not]zeropres! specialized for a pair of (input) sparse vectors/matrices.
# (6) Define general _map_[not]zeropres! capable of handling >2 (input) sparse vectors/matrices.
# (7) Define _broadcast_[not]zeropres! specialized for a pair of (input) sparse vectors/matrices.
# (8) Define general _broadcast_[not]zeropres! capable of handling >2 (input) sparse vectors/matrices.
# (9) Define (broadcast[!]) methods handling combinations of broadcast scalars and sparse vectors/matrices.
# (10) Define (broadcast[!]) methods handling combinations of scalars, sparse vectors/matrices, and structured matrices.
# (11) Define (map[!]) methods handling combinations of sparse and structured matrices.
# (7) Define _broadcast_[not]zeropres! specialized for a single (input) sparse vector/matrix.
# (8) Define _broadcast_[not]zeropres! specialized for a pair of (input) sparse vectors/matrices.
# (9) Define general _broadcast_[not]zeropres! capable of handling >2 (input) sparse vectors/matrices.
# (10) Define (broadcast[!]) methods handling combinations of broadcast scalars and sparse vectors/matrices.
# (11) Define (broadcast[!]) methods handling combinations of scalars, sparse vectors/matrices, and structured matrices.
# (12) Define (map[!]) methods handling combinations of sparse and structured matrices.


# (1) The definitions below provide a common interface to sparse vectors and matrices
Expand Down Expand Up @@ -102,7 +103,6 @@ function broadcast!{Tf}(f::Tf, C::SparseVecOrMat)
end
return C
end
broadcast!{Tf}(f::Tf, C::SparseVecOrMat, A::SparseVecOrMat) = map!(f, C, A)
function broadcast!{Tf,N}(f::Tf, C::SparseVecOrMat, A::SparseVecOrMat, Bs::Vararg{SparseVecOrMat,N})
_aresameshape(C, A, Bs...) && return _noshapecheck_map!(f, C, A, Bs...)
Base.Broadcast.check_broadcast_indices(indices(C), A, Bs...)
Expand Down Expand Up @@ -150,7 +150,8 @@ _maxnnzfrom(shape::NTuple{2}, A::SparseVector) = nnz(A) * div(shape[1], A.n) * s
_maxnnzfrom(shape::NTuple{2}, A::SparseMatrixCSC) = nnz(A) * div(shape[1], A.m) * div(shape[2], A.n)
@inline _maxnnzfrom_each(shape, ::Tuple{}) = ()
@inline _maxnnzfrom_each(shape, As) = (_maxnnzfrom(shape, first(As)), _maxnnzfrom_each(shape, tail(As))...)
@inline _unchecked_maxnnzbcres(shape, As) = min(_densennz(shape), sum(_maxnnzfrom_each(shape, As)))
@inline _unchecked_maxnnzbcres(shape, As::Tuple) = min(_densennz(shape), sum(_maxnnzfrom_each(shape, As)))
@inline _unchecked_maxnnzbcres(shape, As...) = _unchecked_maxnnzbcres(shape, As)
@inline _checked_maxnnzbcres(shape::NTuple{1}, As...) = shape[1] != 0 ? _unchecked_maxnnzbcres(shape, As) : 0
@inline _checked_maxnnzbcres(shape::NTuple{2}, As...) = shape[1] != 0 && shape[2] != 0 ? _unchecked_maxnnzbcres(shape, As) : 0
@inline function _allocres(shape::NTuple{1}, indextype, entrytype, maxnnz)
Expand Down Expand Up @@ -425,7 +426,87 @@ end
end


# (7) _broadcast_zeropres!/_broadcast_notzeropres! specialized for a pair of (input) sparse vectors/matrices
# (7) _broadcast_zeropres!/_broadcast_notzeropres! specialized for a single (input) sparse vector/matrix
function _broadcast_zeropres!{Tf}(f::Tf, C::SparseVecOrMat, A::SparseVecOrMat)
isempty(C) && return _finishempty!(C)
spaceC::Int = min(length(storedinds(C)), length(storedvals(C)))
# C and A cannot have the same shape, as we directed that case to map in broadcast's
# entry point; here we need efficiently handle only heterogeneous C-A combinations where
# one or both of C and A has at least one singleton dimension.
#
# We first divide the cases into two groups: those in which the input argument does not
# expand vertically, and those in which the input argument expands vertically.
#
# Cases without vertical expansion
Ck = 1
if numrows(A) == numrows(C)
@inbounds for j in columns(C)
setcolptr!(C, j, Ck)
bccolrangejA = numcols(A) == 1 ? colrange(A, 1) : colrange(A, j)
for Ak in bccolrangejA
Cx = f(storedvals(A)[Ak])
if !_iszero(Cx)
Ck > spaceC && (spaceC = expandstorage!(C, _unchecked_maxnnzbcres(size(C), A)))
storedinds(C)[Ck] = storedinds(A)[Ak]
storedvals(C)[Ck] = Cx
Ck += 1
end
end
end
# Cases with vertical expansion
else # numrows(A) != numrows(C) (=> numrows(A) == 1)
@inbounds for j in columns(C)
setcolptr!(C, j, Ck)
Ak, stopAk = numcols(A) == 1 ? (colstartind(A, 1), colboundind(A, 1)) : (colstartind(A, j), colboundind(A, j))
Ax = Ak < stopAk ? storedvals(A)[Ak] : zero(eltype(A))
fofAx = f(Ax)
# if fofAx is zero, then either A's jth column is empty, or A's jth column
# contains a nonzero value x but f(Ax) is nonetheless zero, so we need store
# nothing in C's jth column. if to the contrary fofAx is nonzero, then we must
# densely populate C's jth column with fofAx.
if !_iszero(fofAx)
for Ci::indtype(C) in 1:numrows(C)
Ck > spaceC && (spaceC = expandstorage!(C, _unchecked_maxnnzbcres(size(C), A)))
storedinds(C)[Ck] = Ci
storedvals(C)[Ck] = fofAx
Ck += 1
end
end
end
end
@inbounds setcolptr!(C, numcols(C) + 1, Ck)
trimstorage!(C, Ck - 1)
return C
end
function _broadcast_notzeropres!{Tf}(f::Tf, fillvalue, C::SparseVecOrMat, A::SparseVecOrMat)
# For information on this code, see comments in similar code in _broadcast_zeropres! above
# Build dense matrix structure in C, expanding storage if necessary
_densestructure!(C)
# Populate values
fill!(storedvals(C), fillvalue)
# Cases without vertical expansion
if numrows(A) == numrows(C)
@inbounds for (j, jo) in zip(columns(C), _densecoloffsets(C))
bccolrangejA = numcols(A) == 1 ? colrange(A, 1) : colrange(A, j)
for Ak in bccolrangejA
Cx, Ci = f(storedvals(A)[Ak]), storedinds(A)[Ak]
Cx != fillvalue && (storedvals(C)[jo + Ci] = Cx)
end
end
# Cases with vertical expansion
else # numrows(A) != numrows(C) (=> numrows(A) == 1)
@inbounds for (j, jo) in zip(columns(C), _densecoloffsets(C))
Ak, stopAk = numcols(A) == 1 ? (colstartind(A, 1), colboundind(A, 1)) : (colstartind(A, j), colboundind(A, j))
Ax = Ak < stopAk ? storedvals(A)[Ak] : zero(eltype(A))
fofAx = f(Ax)
fofAx != fillvalue && (storedvals(C)[(jo + 1):(jo + numrows(C))] = fofAx)
end
end
return C
end


# (8) _broadcast_zeropres!/_broadcast_notzeropres! specialized for a pair of (input) sparse vectors/matrices
function _broadcast_zeropres!{Tf}(f::Tf, C::SparseVecOrMat, A::SparseVecOrMat, B::SparseVecOrMat)
isempty(C) && return _finishempty!(C)
spaceC::Int = min(length(storedinds(C)), length(storedvals(C)))
Expand Down Expand Up @@ -668,7 +749,7 @@ _finishempty!(C::SparseVector) = C
_finishempty!(C::SparseMatrixCSC) = (fill!(C.colptr, 1); C)


# (8) _broadcast_zeropres!/_broadcast_notzeropres! for more than two (input) sparse vectors/matrices
# (9) _broadcast_zeropres!/_broadcast_notzeropres! for more than two (input) sparse vectors/matrices
function _broadcast_zeropres!{Tf,N}(f::Tf, C::SparseVecOrMat, As::Vararg{SparseVecOrMat,N})
isempty(C) && return _finishempty!(C)
spaceC::Int = min(length(storedinds(C)), length(storedvals(C)))
Expand Down Expand Up @@ -854,7 +935,7 @@ end
end


# (9) broadcast[!] over combinations of broadcast scalars and sparse vectors/matrices
# (10) broadcast[!] over combinations of broadcast scalars and sparse vectors/matrices

# broadcast shape promotion for combinations of sparse arrays and other types
broadcast_indices(::Type{AbstractSparseArray}, A) = indices(A)
Expand Down Expand Up @@ -905,7 +986,7 @@ broadcast{Tf,T}(f::Tf, ::Type{T}, A::SparseMatrixCSC) = broadcast(y -> f(T, y),
broadcast{Tf,T}(f::Tf, A::SparseMatrixCSC, ::Type{T}) = broadcast(x -> f(x, T), A)


# (10) broadcast[!] over combinations of scalars, sparse vectors/matrices, and structured matrices
# (11) broadcast[!] over combinations of scalars, sparse vectors/matrices, and structured matrices

# structured array container type promotion
immutable StructuredArray end
Expand Down Expand Up @@ -943,7 +1024,7 @@ promote_containertype(::Type{Tuple}, ::Type{StructuredArray}) = Array
@inline _sparsifystructured(x) = x


# (11) map[!] over combinations of sparse and structured matrices
# (12) map[!] over combinations of sparse and structured matrices
StructuredMatrix = Union{Diagonal,Bidiagonal,Tridiagonal,SymTridiagonal}
SparseOrStructuredMatrix = Union{SparseMatrixCSC,StructuredMatrix}
map{Tf}(f::Tf, A::StructuredMatrix) = _noshapecheck_map(f, _sparsifystructured(A))
Expand Down
76 changes: 68 additions & 8 deletions test/sparse/higherorderfns.jl
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@

@testset "map[!] implementation specialized for a single (input) sparse vector/matrix" begin
N, M = 10, 12
# (also the implementation for broadcast[!] over a single (input) sparse vector/matrix)
for shapeA in ((N,), (N, M))
A = sprand(shapeA..., 0.4); fA = Array(A)
# --> test map entry point
Expand Down Expand Up @@ -86,18 +85,77 @@ end
@test let z = 0, fz = 0; broadcast!(() -> z += 1, C) == broadcast!(() -> fz += 1, fC); end
end

@testset "broadcast[!] implementation specialized for a single (input) sparse vector/matrix" begin
# broadcast[!] for a single sparse vector/matrix falls back to map[!], tested extensively
# above. here we simply lightly exercise the relevant broadcast[!] entry points.
@testset "broadcast implementation specialized for a single (input) sparse vector/matrix" begin
# broadcast for a single (input) sparse vector/matrix falls back to map, tested
# extensively above. here we simply lightly exercise the relevant broadcast entry
# point.
N, M, p = 10, 12, 0.4
a, A = sprand(N, p), sprand(N, M, p)
fa, fA = Array(a), Array(A)
@test broadcast(sin, a) == sparse(broadcast(sin, fa))
@test broadcast(sin, A) == sparse(broadcast(sin, fA))
@test broadcast!(sin, copy(a), a) == sparse(broadcast!(sin, copy(fa), fa))
@test broadcast!(sin, copy(A), A) == sparse(broadcast!(sin, copy(fA), fA))
@test broadcast!(identity, copy(a), a) == sparse(broadcast!(identity, copy(fa), fa))
@test broadcast!(identity, copy(A), A) == sparse(broadcast!(identity, copy(fA), fA))
end

@testset "broadcast! implementation specialized for a single (input) sparse vector/matrix" begin
N, M, p = 10, 12, 0.3
f(x, y) = x + y + 1
mats = (sprand(N, M, p), sprand(N, 1, p), sprand(1, M, p), sprand(1, 1, 1.0), spzeros(1, 1))
vecs = (sprand(N, p), sprand(1, 1.0), spzeros(1))
# --> test with matrix destination (Z/fZ)
fZ = Array(first(mats))
for Xo in (mats..., vecs...)
X = ndims(Xo) == 1 ? SparseVector{Float32,Int32}(Xo) : SparseMatrixCSC{Float32,Int32}(Xo)
shapeX, fX = size(X), Array(X)
# --> test broadcast! entry point / zero-preserving op
broadcast!(sin, fZ, fX); Z = sparse(fZ)
broadcast!(sin, Z, X); Z = sparse(fZ) # warmup for @allocated
@test (@allocated broadcast!(sin, Z, X)) == 0
@test broadcast!(sin, Z, X) == sparse(broadcast!(sin, fZ, fX))
# --> test broadcast! entry point / not-zero-preserving op
broadcast!(cos, fZ, fX); Z = sparse(fZ)
broadcast!(cos, Z, X); Z = sparse(fZ) # warmup for @allocated
@test (@allocated broadcast!(cos, Z, X)) == 0
@test broadcast!(cos, Z, X) == sparse(broadcast!(cos, fZ, fX))
# --> test shape checks for broadcast! entry point
# TODO strengthen this test, avoiding dependence on checking whether
# broadcast_indices throws to determine whether sparse broadcast should throw
try
Base.Broadcast.check_broadcast_indices(indices(Z), spzeros((shapeX .- 1)...))
catch
@test_throws DimensionMismatch broadcast!(sin, Z, spzeros((shapeX .- 1)...))
end
end
# --> test with vector destination (V/fV)
fV = Array(first(vecs))
for Xo in vecs # vector target
X = SparseVector{Float32,Int32}(Xo)
shapeX, fX = size(X), Array(X)
# --> test broadcast! entry point / zero-preserving op
broadcast!(sin, fV, fX); V = sparse(fV)
broadcast!(sin, V, X); V = sparse(fV) # warmup for @allocated
@test (@allocated broadcast!(sin, V, X)) == 0
@test broadcast!(sin, V, X) == sparse(broadcast!(sin, fV, fX))
# --> test broadcast! entry point / not-zero-preserving
broadcast!(cos, fV, fX); V = sparse(fV)
broadcast!(cos, V, X); V = sparse(fV) # warmup for @allocated
@test (@allocated broadcast!(cos, V, X)) == 0
@test broadcast!(cos, V, X) == sparse(broadcast!(cos, fV, fX))
# --> test shape checks for broadcast! entry point
# TODO strengthen this test, avoiding dependence on checking whether
# broadcast_indices throws to determine whether sparse broadcast should throw
try
Base.Broadcast.check_broadcast_indices(indices(V), spzeros((shapeX .- 1)...))
catch
@test_throws DimensionMismatch broadcast!(sin, V, spzeros((shapeX .- 1)...))
end
end
# Tests specific to #19895, i.e. for broadcast!(identity, C, A) specializations
Z = copy(first(mats)); fZ = Array(Z)
V = copy(first(vecs)); fV = Array(V)
for X in (mats..., vecs...)
@test broadcast!(identity, Z, X) == sparse(broadcast!(identity, fZ, Array(X)))
X isa SparseVector && @test broadcast!(identity, V, X) == sparse(broadcast!(identity, fV, Array(X)))
end
end

@testset "broadcast[!] implementation specialized for pairs of (input) sparse vectors/matrices" begin
Expand Down Expand Up @@ -192,6 +250,8 @@ end
# up with --track-allocation=user. allocation shows up on the first line of the
# entry point for broadcast! with --track-allocation=all, but that first line
# almost certainly should not allocate. so not certain what's going on.
# additional info: occurs for broadcast!(f, Z, X) for Z and X of different
# shape, but not for Z and X of the same shape.
@test broadcast!(f, Q, X, Y, Z) == sparse(broadcast!(f, fQ, fX, fY, fZ))
# --> test shape checks for both broadcast and broadcast! entry points
# TODO strengthen this test, avoiding dependence on checking whether
Expand Down