diff --git a/NEWS.md b/NEWS.md index 429b063162805..50bef68b74857 100644 --- a/NEWS.md +++ b/NEWS.md @@ -128,6 +128,8 @@ Deprecated or removed * `issym` is deprecated in favor of `issymmetric` to match similar functions (`ishermitian`, ...) ([#15192]) + * `scale` is deprecated in favor of either `α*A`, `Diagonal(x)*A`, or `A*Diagonal(x)`. ([#15258]) + Julia v0.4.0 Release Notes ========================== diff --git a/base/deprecated.jl b/base/deprecated.jl index 2aea580bfa700..9c80f62304284 100644 --- a/base/deprecated.jl +++ b/base/deprecated.jl @@ -993,3 +993,9 @@ export call # Changed issym to issymmetric. #15192 @deprecate issym issymmetric + +# 15258 +@deprecate scale(α::Number, A::AbstractArray) α*A +@deprecate scale(A::AbstractArray, α::Number) A*α +@deprecate scale(A::AbstractMatrix, x::AbstractVector) A*Diagonal(x) +@deprecate scale(x::AbstractVector, A::AbstractMatrix) Diagonal(x)*A diff --git a/base/docs/helpdb/Base.jl b/base/docs/helpdb/Base.jl index 19a393f83d248..02d356aff10f3 100644 --- a/base/docs/helpdb/Base.jl +++ b/base/docs/helpdb/Base.jl @@ -4382,20 +4382,6 @@ i-th dimension of `A` should be repeated. """ repeat -""" - scale(A, b) - scale(b, A) - -Scale an array `A` by a scalar `b`, returning a new array. - -If `A` is a matrix and `b` is a vector, then `scale(A,b)` scales each column `i` of `A` by -`b[i]` (similar to `A*diagm(b)`), while `scale(b,A)` scales each row `i` of `A` by `b[i]` -(similar to `diagm(b)*A`), returning a new array. - -Note: for large `A`, `scale` can be much faster than `A .* b` or `b .* A`, due to the use of BLAS. -""" -scale - """ ReentrantLock() @@ -6680,12 +6666,11 @@ issetuid scale!(A, b) scale!(b, A) -Scale an array `A` by a scalar `b`, similar to [`scale`](:func:`scale`) but overwriting `A` -in-place. +Scale an array `A` by a scalar `b` overwriting `A` in-place. If `A` is a matrix and `b` is a vector, then `scale!(A,b)` scales each column `i` of `A` by -`b[i]` (similar to `A*diagm(b)`), while `scale!(b,A)` scales each row `i` of `A` by `b[i]` -(similar to `diagm(b)*A`), again operating in-place on `A`. +`b[i]` (similar to `A*Diagonal(b)`), while `scale!(b,A)` scales each row `i` of `A` by `b[i]` +(similar to `Diagonal(b)*A`), again operating in-place on `A`. """ scale! diff --git a/base/linalg.jl b/base/linalg.jl index cec964f13830d..631722bfde676 100644 --- a/base/linalg.jl +++ b/base/linalg.jl @@ -111,7 +111,6 @@ export lqfact!, lqfact, rank, - scale, scale!, schur, schurfact!, diff --git a/base/linalg/dense.jl b/base/linalg/dense.jl index f1919bda8e654..2b43fcfe7331c 100644 --- a/base/linalg/dense.jl +++ b/base/linalg/dense.jl @@ -179,7 +179,7 @@ function ^(A::Matrix, p::Number) v, X = eig(A) any(v.<0) && (v = complex(v)) Xinv = ishermitian(A) ? X' : inv(X) - scale(X, v.^p)*Xinv + (X * Diagonal(v.^p)) * Xinv end # Matrix exponential @@ -483,7 +483,7 @@ function pinv{T}(A::StridedMatrix{T}, tol::Real) index = SVD.S .> tol*maximum(SVD.S) Sinv[index] = one(Stype) ./ SVD.S[index] Sinv[find(!isfinite(Sinv))] = zero(Stype) - return SVD.Vt'scale(Sinv, SVD.U') + return SVD.Vt' * (Diagonal(Sinv) * SVD.U') end function pinv{T}(A::StridedMatrix{T}) tol = eps(real(float(one(T))))*maximum(size(A)) diff --git a/base/linalg/diagonal.jl b/base/linalg/diagonal.jl index 69679ea9d07d8..fe537cf305517 100644 --- a/base/linalg/diagonal.jl +++ b/base/linalg/diagonal.jl @@ -106,8 +106,10 @@ end /{T<:Number}(D::Diagonal, x::T) = Diagonal(D.diag / x) *(Da::Diagonal, Db::Diagonal) = Diagonal(Da.diag .* Db.diag) *(D::Diagonal, V::AbstractVector) = D.diag .* V -*(A::AbstractMatrix, D::Diagonal) = scale(A,D.diag) -*(D::Diagonal, A::AbstractMatrix) = scale(D.diag,A) +*(A::AbstractMatrix, D::Diagonal) = + scale!(similar(A, promote_op(MulFun(), eltype(A), eltype(D.diag))), A, D.diag) +*(D::Diagonal, A::AbstractMatrix) = + scale!(similar(A, promote_op(MulFun(), eltype(A), eltype(D.diag))), D.diag, A) A_mul_B!(A::Diagonal,B::AbstractMatrix) = scale!(A.diag,B) At_mul_B!(A::Diagonal,B::AbstractMatrix)= scale!(A.diag,B) @@ -181,8 +183,8 @@ function A_ldiv_B!(D::Diagonal, B::StridedVecOrMat) end return B end -(\)(D::Diagonal, B::AbstractMatrix) = scale(1 ./ D.diag, B) -(\)(D::Diagonal, b::AbstractVector) = reshape(scale(1 ./ D.diag, reshape(b, length(b), 1)), length(b)) +(\)(D::Diagonal, A::AbstractMatrix) = D.diag .\ A +(\)(D::Diagonal, b::AbstractVector) = D.diag .\ b (\)(Da::Diagonal, Db::Diagonal) = Diagonal(Db.diag ./ Da.diag) function inv{T}(D::Diagonal{T}) diff --git a/base/linalg/generic.jl b/base/linalg/generic.jl index ae0d89e3ac61f..9042faf621272 100644 --- a/base/linalg/generic.jl +++ b/base/linalg/generic.jl @@ -2,9 +2,6 @@ ## linalg.jl: Some generic Linear Algebra definitions -scale(X::AbstractArray, s::Number) = X*s -scale(s::Number, X::AbstractArray) = s*X - # For better performance when input and output are the same array # See https://github.com/JuliaLang/julia/issues/8415#issuecomment-56608729 function generic_scale!(X::AbstractArray, s::Number) diff --git a/base/linalg/matmul.jl b/base/linalg/matmul.jl index ca97c342ffbc6..a11017f3404f3 100644 --- a/base/linalg/matmul.jl +++ b/base/linalg/matmul.jl @@ -38,8 +38,6 @@ function scale!(C::AbstractMatrix, b::AbstractVector, A::AbstractMatrix) end C end -scale(A::AbstractMatrix, b::AbstractVector) = scale!(similar(A, promote_op(MulFun(),eltype(A),eltype(b))), A, b) -scale(b::AbstractVector, A::AbstractMatrix) = scale!(similar(b, promote_op(MulFun(),eltype(b),eltype(A)), size(A)), b, A) # Dot products diff --git a/base/sparse.jl b/base/sparse.jl index 56ca4cdaf5a6c..8ea18173d8187 100644 --- a/base/sparse.jl +++ b/base/sparse.jl @@ -21,7 +21,7 @@ import Base: @get!, acos, acosd, acot, acotd, acsch, asech, asin, asind, asinh, exp, expm1, factorize, find, findmax, findmin, findnz, float, full, getindex, hcat, hvcat, imag, indmax, ishermitian, kron, length, log, log1p, max, min, maximum, minimum, norm, one, promote_eltype, real, reinterpret, reshape, rot180, - rotl90, rotr90, round, scale, scale!, setindex!, similar, size, transpose, tril, + rotl90, rotr90, round, scale!, setindex!, similar, size, transpose, tril, triu, vcat, vec import Base.Broadcast: eltype_plus, broadcast_shape diff --git a/base/sparse/cholmod.jl b/base/sparse/cholmod.jl index 8388c351d866c..dbf65105356fb 100644 --- a/base/sparse/cholmod.jl +++ b/base/sparse/cholmod.jl @@ -1020,7 +1020,7 @@ function sparse(F::Factor) else LD = sparse(F[:LD]) L, d = getLd!(LD) - A = scale(L, d)*L' + A = (L * Diagonal(d)) * L' end SparseArrays.sortSparseMatrixCSC!(A) p = get_perm(F) diff --git a/base/sparse/linalg.jl b/base/sparse/linalg.jl index cc185d3940b57..ba38612573135 100644 --- a/base/sparse/linalg.jl +++ b/base/sparse/linalg.jl @@ -826,12 +826,6 @@ end scale!(A::SparseMatrixCSC, b::Number) = (scale!(A.nzval, b); A) scale!(b::Number, A::SparseMatrixCSC) = (scale!(b, A.nzval); A) -scale{Tv,Ti,T}(A::SparseMatrixCSC{Tv,Ti}, b::Vector{T}) = - scale!(similar(A, promote_type(Tv,T)), A, b) - -scale{T,Tv,Ti}(b::Vector{T}, A::SparseMatrixCSC{Tv,Ti}) = - scale!(similar(A, promote_type(Tv,T)), b, A) - function factorize(A::SparseMatrixCSC) m, n = size(A) if m == n diff --git a/base/sparse/sparsevector.jl b/base/sparse/sparsevector.jl index 3798d87fdf744..da54b2861980e 100644 --- a/base/sparse/sparsevector.jl +++ b/base/sparse/sparsevector.jl @@ -1191,18 +1191,11 @@ scale!(x::AbstractSparseVector, a::Complex) = (scale!(nonzeros(x), a); x) scale!(a::Real, x::AbstractSparseVector) = scale!(nonzeros(x), a) scale!(a::Complex, x::AbstractSparseVector) = scale!(nonzeros(x), a) -scale(x::AbstractSparseVector, a::Real) = - SparseVector(length(x), copy(nonzeroinds(x)), scale(nonzeros(x), a)) -scale(x::AbstractSparseVector, a::Complex) = - SparseVector(length(x), copy(nonzeroinds(x)), scale(nonzeros(x), a)) - -scale(a::Real, x::AbstractSparseVector) = scale(x, a) -scale(a::Complex, x::AbstractSparseVector) = scale(x, a) - -*(x::AbstractSparseVector, a::Number) = scale(x, a) -*(a::Number, x::AbstractSparseVector) = scale(x, a) -.*(x::AbstractSparseVector, a::Number) = scale(x, a) -.*(a::Number, x::AbstractSparseVector) = scale(x, a) +# *(x::AbstractSparseVector, a::Real) = SparseVector(length(x), copy(nonzeroinds(x)), scale(nonzeros(x), a)) +# *(x::AbstractSparseVector, a::Complex) = SparseVector(length(x), copy(nonzeroinds(x)), scale(nonzeros(x), a)) +# *(a::Number, x::AbstractSparseVector) = xa) +# .*(x::AbstractSparseVector, a::Number) = scale(x, a) +# .*(a::Number, x::AbstractSparseVector) = scale(x, a) # dot diff --git a/doc/stdlib/linalg.rst b/doc/stdlib/linalg.rst index f38976a82823b..db31625bfa247 100644 --- a/doc/stdlib/linalg.rst +++ b/doc/stdlib/linalg.rst @@ -823,25 +823,14 @@ Linear algebra functions in Julia are largely implemented by calling functions f Construct a diagonal matrix and place ``v`` on the ``k``\ th diagonal. -.. function:: scale(A, b) - scale(b, A) - - .. Docstring generated from Julia source - - Scale an array ``A`` by a scalar ``b``\ , returning a new array. - - If ``A`` is a matrix and ``b`` is a vector, then ``scale(A,b)`` scales each column ``i`` of ``A`` by ``b[i]`` (similar to ``A*diagm(b)``\ ), while ``scale(b,A)`` scales each row ``i`` of ``A`` by ``b[i]`` (similar to ``diagm(b)*A``\ ), returning a new array. - - Note: for large ``A``\ , ``scale`` can be much faster than ``A .* b`` or ``b .* A``\ , due to the use of BLAS. - .. function:: scale!(A, b) scale!(b, A) .. Docstring generated from Julia source - Scale an array ``A`` by a scalar ``b``\ , similar to :func:`scale` but overwriting ``A`` in-place. + Scale an array ``A`` by a scalar ``b`` overwriting ``A`` in-place. - If ``A`` is a matrix and ``b`` is a vector, then ``scale!(A,b)`` scales each column ``i`` of ``A`` by ``b[i]`` (similar to ``A*diagm(b)``\ ), while ``scale!(b,A)`` scales each row ``i`` of ``A`` by ``b[i]`` (similar to ``diagm(b)*A``\ ), again operating in-place on ``A``\ . + If ``A`` is a matrix and ``b`` is a vector, then ``scale!(A,b)`` scales each column ``i`` of ``A`` by ``b[i]`` (similar to ``A*Diagonal(b)``\ ), while ``scale!(b,A)`` scales each row ``i`` of ``A`` by ``b[i]`` (similar to ``Diagonal(b)*A``\ ), again operating in-place on ``A``\ . .. function:: Tridiagonal(dl, d, du) diff --git a/test/blas.jl b/test/blas.jl index 8b7d0d1680c3e..6671a919f0d75 100644 --- a/test/blas.jl +++ b/test/blas.jl @@ -113,7 +113,7 @@ for elty in [Float32, Float64, Complex64, Complex128] # scal α = rand(elty) - @test BLAS.scal(n,α,a,1) ≈ scale(α,a) + @test BLAS.scal(n,α,a,1) ≈ α * a # trsv A = triu(rand(elty,n,n)) diff --git a/test/linalg/eigen.jl b/test/linalg/eigen.jl index eb8063ac5f098..cbddd64b93044 100644 --- a/test/linalg/eigen.jl +++ b/test/linalg/eigen.jl @@ -50,7 +50,7 @@ debug && println("symmetric generalized eigenproblem") asym_sg = asym[1:n1, 1:n1] a_sg = a[:,n1+1:n2] f = eigfact(asym_sg, a_sg'a_sg) - @test_approx_eq asym_sg*f[:vectors] scale(a_sg'a_sg*f[:vectors], f[:values]) + @test_approx_eq asym_sg*f[:vectors] (a_sg'a_sg*f[:vectors]) * Diagonal(f[:values]) @test_approx_eq f[:values] eigvals(asym_sg, a_sg'a_sg) @test_approx_eq_eps prod(f[:values]) prod(eigvals(asym_sg/(a_sg'a_sg))) 200ε @test eigvecs(asym_sg, a_sg'a_sg) == f[:vectors] @@ -66,7 +66,7 @@ debug && println("Non-symmetric generalized eigenproblem") a1_nsg = a[1:n1, 1:n1] a2_nsg = a[n1+1:n2, n1+1:n2] f = eigfact(a1_nsg, a2_nsg) - @test_approx_eq a1_nsg*f[:vectors] scale(a2_nsg*f[:vectors], f[:values]) + @test_approx_eq a1_nsg*f[:vectors] (a2_nsg*f[:vectors]) * Diagonal(f[:values]) @test_approx_eq f[:values] eigvals(a1_nsg, a2_nsg) @test_approx_eq_eps prod(f[:values]) prod(eigvals(a1_nsg/a2_nsg)) 50000ε @test eigvecs(a1_nsg, a2_nsg) == f[:vectors] diff --git a/test/linalg/generic.jl b/test/linalg/generic.jl index 3b33f222d7725..8034dae36954b 100644 --- a/test/linalg/generic.jl +++ b/test/linalg/generic.jl @@ -105,26 +105,6 @@ let aa = reshape([1.:6;], (2,3)) a = sub(aa, 1:2, 1:2) end - # 2-argument version of scale - @test scale(a, 5.) == a*5 - @test scale(5., a) == a*5 - @test scale([1.; 2.], a) == a.*[1; 2] - @test scale([1; 2], a) == a.*[1; 2] - @test scale(eye(Int, 2), 0.5) == 0.5*eye(2) - @test scale([1; 2], sub(a, :, :)) == a.*[1; 2] - @test scale(sub([1; 2], :), a) == a.*[1; 2] - @test_throws DimensionMismatch scale(ones(3), a) - - if atype == "Array" - @test scale(a, [1.; 2.; 3.]) == a.*[1 2 3] - @test scale(a, [1; 2; 3]) == a.*[1 2 3] - @test_throws DimensionMismatch scale(a, ones(2)) - else - @test scale(a, [1.; 2.]) == a.*[1 2] - @test scale(a, [1; 2]) == a.*[1 2] - @test_throws DimensionMismatch scale(a, ones(3)) - end - # 2-argument version of scale! @test scale!(copy(a), 5.) == a*5 @test scale!(5., copy(a)) == a*5 @@ -168,15 +148,15 @@ end # scale real matrix by complex type @test_throws InexactError scale!([1.0], 2.0im) -@test isequal(scale([1.0], 2.0im), Complex{Float64}[2.0im]) -@test isequal(scale(2.0im, [1.0]), Complex{Float64}[2.0im]) -@test isequal(scale(Float32[1.0], 2.0f0im), Complex{Float32}[2.0im]) -@test isequal(scale(Float32[1.0], 2.0im), Complex{Float64}[2.0im]) -@test isequal(scale(Float64[1.0], 2.0f0im), Complex{Float64}[2.0im]) -@test isequal(scale(Float32[1.0], big(2.0)im), Complex{BigFloat}[2.0im]) -@test isequal(scale(Float64[1.0], big(2.0)im), Complex{BigFloat}[2.0im]) -@test isequal(scale(BigFloat[1.0], 2.0im), Complex{BigFloat}[2.0im]) -@test isequal(scale(BigFloat[1.0], 2.0f0im), Complex{BigFloat}[2.0im]) +@test isequal([1.0] * 2.0im, Complex{Float64}[2.0im]) +@test isequal(2.0im * [1.0], Complex{Float64}[2.0im]) +@test isequal(Float32[1.0] * 2.0f0im, Complex{Float32}[2.0im]) +@test isequal(Float32[1.0] * 2.0im, Complex{Float64}[2.0im]) +@test isequal(Float64[1.0] * 2.0f0im, Complex{Float64}[2.0im]) +@test isequal(Float32[1.0] * big(2.0)im, Complex{BigFloat}[2.0im]) +@test isequal(Float64[1.0] * big(2.0)im, Complex{BigFloat}[2.0im]) +@test isequal(BigFloat[1.0] * 2.0im, Complex{BigFloat}[2.0im]) +@test isequal(BigFloat[1.0] * 2.0f0im, Complex{BigFloat}[2.0im]) # test scale and scale! for non-commutative multiplication q = Quaternion([0.44567, 0.755871, 0.882548, 0.423612]) diff --git a/test/linalg/lapack.jl b/test/linalg/lapack.jl index c22a4768236a5..8b33caec387ea 100644 --- a/test/linalg/lapack.jl +++ b/test/linalg/lapack.jl @@ -16,7 +16,7 @@ let # syevr A = convert(Array{elty, 2}, A) Asym = A'A vals, Z = LAPACK.syevr!('V', copy(Asym)) - @test_approx_eq Z*scale(vals, Z') Asym + @test_approx_eq Z * (Diagonal(vals) * Z') Asym @test all(vals .> 0.0) @test_approx_eq LAPACK.syevr!('N','V','U',copy(Asym),0.0,1.0,4,5,-1.0)[1] vals[vals .< 1.0] @test_approx_eq LAPACK.syevr!('N','I','U',copy(Asym),0.0,1.0,4,5,-1.0)[1] vals[4:5] diff --git a/test/linalg/svd.jl b/test/linalg/svd.jl index 9431cf0d0884b..86985a429557b 100644 --- a/test/linalg/svd.jl +++ b/test/linalg/svd.jl @@ -30,7 +30,7 @@ debug && println("\ntype of a: ", eltya, "\n") debug && println("singular value decomposition") usv = svdfact(a) @test usv[:S] === svdvals(usv) - @test usv[:U]*scale(usv[:S],usv[:Vt]) ≈ a + @test usv[:U] * (Diagonal(usv[:S]) * usv[:Vt]) ≈ a @test full(usv) ≈ a @test usv[:Vt]' ≈ usv[:V] @test_throws KeyError usv[:Z] diff --git a/test/linalg/triangular.jl b/test/linalg/triangular.jl index fe00174b1e55e..bff1e05d4b050 100644 --- a/test/linalg/triangular.jl +++ b/test/linalg/triangular.jl @@ -207,12 +207,6 @@ for elty1 in (Float32, Float64, BigFloat, Complex64, Complex128, Complex{BigFloa end end - @test scale(A1,0.5) == 0.5*A1 - @test scale(0.5,A1) == 0.5*A1 - @test scale(A1,0.5im) == 0.5im*A1 - @test scale(0.5im,A1) == 0.5im*A1 - - # Binary operations @test A1*0.5 == full(A1)*0.5 @test 0.5*A1 == 0.5*full(A1) diff --git a/test/sparsedir/sparse.jl b/test/sparsedir/sparse.jl index e147b19f940cf..56f498d6484da 100644 --- a/test/sparsedir/sparse.jl +++ b/test/sparsedir/sparse.jl @@ -228,20 +228,20 @@ sA = sprandn(3, 7, 0.5) sC = similar(sA) dA = full(sA) b = randn(7) -@test scale(dA, b) == scale(sA, b) -@test scale(dA, b) == scale!(sC, sA, b) -@test scale(dA, b) == scale!(copy(sA), b) +@test dA * Diagonal(b) == sA * Diagonal(b) +@test dA * Diagonal(b) == scale!(sC, sA, b) +@test dA * Diagonal(b) == scale!(copy(sA), b) b = randn(3) -@test scale(b, dA) == scale(b, sA) -@test scale(b, dA) == scale!(sC, b, sA) -@test scale(b, dA) == scale!(b, copy(sA)) - -@test scale(dA, 0.5) == scale(sA, 0.5) -@test scale(dA, 0.5) == scale!(sC, sA, 0.5) -@test scale(dA, 0.5) == scale!(copy(sA), 0.5) -@test scale(0.5, dA) == scale(0.5, sA) -@test scale(0.5, dA) == scale!(sC, sA, 0.5) -@test scale(0.5, dA) == scale!(0.5, copy(sA)) +@test Diagonal(b) * dA == Diagonal(b) * sA +@test Diagonal(b) * dA == scale!(sC, b, sA) +@test Diagonal(b) * dA == scale!(b, copy(sA)) + +@test dA * 0.5 == sA * 0.5 +@test dA * 0.5 == scale!(sC, sA, 0.5) +@test dA * 0.5 == scale!(copy(sA), 0.5) +@test 0.5 * dA == 0.5 * sA +@test 0.5 * dA == scale!(sC, sA, 0.5) +@test 0.5 * dA == scale!(0.5, copy(sA)) @test scale!(sC, 0.5, sA) == scale!(sC, sA, 0.5) # copy! diff --git a/test/sparsedir/sparsevector.jl b/test/sparsedir/sparsevector.jl index 069cf5c67eb45..f43aaab188aa8 100644 --- a/test/sparsedir/sparsevector.jl +++ b/test/sparsedir/sparsevector.jl @@ -580,10 +580,10 @@ let x = sprand(16, 0.5), x2 = sprand(16, 0.4) # scale let sx = SparseVector(x.n, x.nzind, x.nzval * 2.5) - @test exact_equal(scale(x, 2.5), sx) - @test exact_equal(scale(x, 2.5 + 0.0*im), complex(sx)) - @test exact_equal(scale(2.5, x), sx) - @test exact_equal(scale(2.5 + 0.0*im, x), complex(sx)) + @test exact_equal(x * 2.5, sx) + @test exact_equal(x * (2.5 + 0.0*im), complex(sx)) + @test exact_equal(2.5 * x, sx) + @test exact_equal((2.5 + 0.0*im) * x, complex(sx)) @test exact_equal(x * 2.5, sx) @test exact_equal(2.5 * x, sx) @test exact_equal(x .* 2.5, sx) diff --git a/test/sparsedir/umfpack.jl b/test/sparsedir/umfpack.jl index 0a6b46e57ab82..deb69dc75d157 100644 --- a/test/sparsedir/umfpack.jl +++ b/test/sparsedir/umfpack.jl @@ -19,7 +19,7 @@ for Tv in (Float64, Complex128) lua = lufact(A) @test nnz(lua) == 18 L,U,p,q,Rs = lua[:(:)] - @test_approx_eq scale(Rs,A)[p,q] L*U + @test_approx_eq (Diagonal(Rs) * A)[p,q] L * U @test_approx_eq det(lua) det(full(A)) @@ -45,7 +45,7 @@ for Ti in Base.SparseArrays.UMFPACK.UMFITypes.types Ac = convert(SparseMatrixCSC{Complex128,Ti}, Ac0) lua = lufact(Ac) L,U,p,q,Rs = lua[:(:)] - @test_approx_eq scale(Rs,Ac)[p,q] L*U + @test_approx_eq (Diagonal(Rs) * Ac)[p,q] L * U end for elty in (Float64, Complex128) @@ -53,7 +53,7 @@ for elty in (Float64, Complex128) A = sparse([1:min(m,n); rand(1:m, 10)], [1:min(m,n); rand(1:n, 10)], elty == Float64 ? randn(min(m, n) + 10) : complex(randn(min(m, n) + 10), randn(min(m, n) + 10))) F = lufact(A) L, U, p, q, Rs = F[:(:)] - @test_approx_eq scale(Rs,A)[p,q] L*U + @test_approx_eq (Diagonal(Rs) * A)[p,q] L * U end end