diff --git a/lib/cusparse/conversions.jl b/lib/cusparse/conversions.jl index f21dd8ace6..c016f9e83c 100644 --- a/lib/cusparse/conversions.jl +++ b/lib/cusparse/conversions.jl @@ -1,6 +1,45 @@ # conversion routines between different sparse and dense storage formats -SparseArrays.sparse(::DenseCuArray, args...) = error("CUSPARSE supports multiple sparse formats, use specific constructors instead (e.g. CuSparseMatrixCSC)") +""" + sparse(x::DenseCuMatrix; fmt=:csc) + sparse(I::CuVector, J::CuVector, V::CuVector, [m, n]; fmt=:csc) + +Return a sparse cuda matrix, with type determined by `fmt`. +Possible formats are :csc, :csr, :bsr, and :coo. +""" +function SparseArrays.sparse(x::DenseCuMatrix; fmt=:csc) + if fmt == :csc + return CuSparseMatrixCSC(x) + elseif fmt == :csr + return CuSparseMatrixCSR(x) + elseif fmt == :bsr + return CuSparseMatrixBSR(x) + elseif fmt == :coo + return CuSparseMatrixCOO(x) + else + error("Format :$fmt not available, use :csc, :csr, :bsr or :coo.") + end +end + +SparseArrays.sparse(I::CuVector, J::CuVector, V::CuVector; kws...) = + sparse(I, J, V, maximum(I), maximum(J); kws...) + +SparseArrays.sparse(I::CuVector, J::CuVector, V::CuVector, m, n; kws...) = + sparse(Cint.(I), Cint.(J), V, m, n; kws...) + +function SparseArrays.sparse(I::CuVector{Cint}, J::CuVector{Cint}, V::CuVector{Tv}, m, n; + fmt=:csc) where Tv + spcoo = CuSparseMatrixCOO{Tv}(I, J, V, (m, n)) + if fmt == :csc + return CuSparseMatrixCSC(spcoo) + elseif fmt == :csr + return CuSparseMatrixCSR(spcoo) + elseif fmt == :coo + return spcoo + else + error("Format :$fmt not available, use :csc, :csr, or :coo.") + end +end ## CSR to CSC @@ -238,6 +277,14 @@ function CuSparseMatrixCOO(csr::CuSparseMatrixCSR{Tv}, ind::SparseChar='O') wher CuSparseMatrixCOO{Tv}(cooRowInd, csr.colVal, csr.nzVal, csr.dims, nnz) end +### CSC/BST to COO and viceversa + +CuSparseMatrixCSC(coo::CuSparseMatrixCOO) = CuSparseMatrixCSC(CuSparseMatrixCSR(coo)) # no direct conversion +CuSparseMatrixCOO(csc::CuSparseMatrixCSC) = CuSparseMatrixCOO(CuSparseMatrixCSR(csc)) # no direct conversion +CuSparseMatrixBSR(coo::CuSparseMatrixCOO, blockdim) = CuSparseMatrixBSR(CuSparseMatrixCSR(coo), blockdim) # no direct conversion +CuSparseMatrixCOO(bsr::CuSparseMatrixBSR) = CuSparseMatrixCOO(CuSparseMatrixCSR(bsr)) # no direct conversion + + ## sparse to dense, and vice-versa for (cname,rname,elty) in ((:cusparseScsc2dense, :cusparseScsr2dense, :Float32), diff --git a/test/cusparse/conversions.jl b/test/cusparse/conversions.jl new file mode 100644 index 0000000000..83190338d8 --- /dev/null +++ b/test/cusparse/conversions.jl @@ -0,0 +1,36 @@ +using CUDA.CUSPARSE, SparseArrays + +@testset "sparse" begin + n, m = 4, 4 + I = [1,2,3] |> cu + J = [2,3,4] |> cu + V = Float32[1,2,3] |> cu + + dense = rand(3,3) |> cu + + # check defaults + @test sparse(I, J, V) isa CuSparseMatrixCSC + @test sparse(dense) isa CuSparseMatrixCSC + + for (fmt, T) in [(:coo, CuSparseMatrixCOO), + (:csc, CuSparseMatrixCSC), + (:csr, CuSparseMatrixCSR), + (:bsr, CuSparseMatrixBSR) + ] + if fmt != :bsr # bsr not supported + x = sparse(I, J, V; fmt=fmt) + @test x isa T{Float32} + @test size(x) == (3, 4) + + x = sparse(I, J, V, m, n; fmt=fmt) + @test x isa T{Float32} + @test size(x) == (4, 4) + end + + if fmt != :coo # dense to COO not implemented + x = sparse(dense; fmt=fmt) + @test x isa T{Float32} + @test collect(x) == collect(dense) + end + end +end