Skip to content

Commit

Permalink
buildkite for GNNlib + get_graph_type (#530)
Browse files Browse the repository at this point in the history
  • Loading branch information
CarloLucibello authored Nov 30, 2024
1 parent 3ed702b commit 49b6398
Show file tree
Hide file tree
Showing 19 changed files with 658 additions and 200 deletions.
51 changes: 51 additions & 0 deletions .buildkite/pipeline.yml
Original file line number Diff line number Diff line change
Expand Up @@ -49,3 +49,54 @@ steps:
GNN_TEST_AMDGPU: "true"
GNN_TEST_CPU: "false"
timeout_in_minutes: 60

- label: "GNNlib CUDA"
plugins:
- JuliaCI/julia#v1:
version: "1"
- JuliaCI/julia-coverage#v1:
dirs:
- GNNlib/src
command: |
julia --color=yes --depwarn=yes --project=GNNlib/test -e '
import Pkg
dev_pkgs = Pkg.PackageSpec[]
for pkg in ("GNNGraphs", "GNNlib")
push!(dev_pkgs, Pkg.PackageSpec(path=pkg));
end
Pkg.develop(dev_pkgs)
Pkg.add(["CUDA", "cuDNN"])
Pkg.test("GNNlib")'
agents:
queue: "juliagpu"
cuda: "*"
env:
GNN_TEST_CUDA: "true"
GNN_TEST_CPU: "false"
timeout_in_minutes: 60

- label: "GNNlib AMDGPU"
plugins:
- JuliaCI/julia#v1:
version: "1"
- JuliaCI/julia-coverage#v1:
dirs:
- GNNlib/src
command: |
julia --color=yes --depwarn=yes --project=GNNlib/test -e '
import Pkg
dev_pkgs = Pkg.PackageSpec[]
for pkg in ("GNNGraphs", "GNNlib")
push!(dev_pkgs, Pkg.PackageSpec(path=pkg));
end
Pkg.develop(dev_pkgs)
Pkg.add(["AMDGPU"])
Pkg.test("GNNlib")'
agents:
queue: "juliagpu"
rocm: "*"
rocmgpu: "*"
env:
GNN_TEST_AMDGPU: "true"
GNN_TEST_CPU: "false"
timeout_in_minutes: 60
1 change: 1 addition & 0 deletions GNNGraphs/src/GNNGraphs.jl
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ include("query.jl")
export adjacency_list,
edge_index,
get_edge_weight,
get_graph_type,
graph_indicator,
has_multi_edges,
is_directed,
Expand Down
4 changes: 2 additions & 2 deletions GNNGraphs/src/operators.jl
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,8 @@ Intersect two graphs by keeping only the common edges.
"""
function Base.intersect(g1::GNNGraph, g2::GNNGraph)
@assert g1.num_nodes == g2.num_nodes
@assert graph_type_symbol(g1) == graph_type_symbol(g2)
graph_type = graph_type_symbol(g1)
@assert get_graph_type(g1) == get_graph_type(g2)
graph_type = get_graph_type(g1)
num_nodes = g1.num_nodes

idx1, _ = edge_encoding(edge_index(g1)..., num_nodes)
Expand Down
58 changes: 55 additions & 3 deletions GNNGraphs/src/query.jl
Original file line number Diff line number Diff line change
Expand Up @@ -80,9 +80,61 @@ function Graphs.has_edge(g::GNNHeteroGraph, edge_t::EType, i::Integer, j::Intege
return any((s .== i) .& (t .== j))
end

graph_type_symbol(::GNNGraph{<:COO_T}) = :coo
graph_type_symbol(::GNNGraph{<:SPARSE_T}) = :sparse
graph_type_symbol(::GNNGraph{<:ADJMAT_T}) = :dense
"""
get_graph_type(g::GNNGraph)
Return the underlying representation for the graph `g` as a symbol.
Possible values are:
- `:coo`: Coordinate list representation. The graph is stored as a tuple of vectors `(s, t, w)`,
where `s` and `t` are the source and target nodes of the edges, and `w` is the edge weights.
- `:sparse`: Sparse matrix representation. The graph is stored as a sparse matrix representing the weighted adjacency matrix.
- `:dense`: Dense matrix representation. The graph is stored as a dense matrix representing the weighted adjacency matrix.
The default representation for graph constructors GNNGraphs.jl is `:coo`.
The underlying representation can be accessed through the `g.graph` field.
See also [`GNNGraph`](@ref).
# Examples
The default representation for graph constructors GNNGraphs.jl is `:coo`.
```jldoctest
julia> g = rand_graph(5, 10)
GNNGraph:
num_nodes: 5
num_edges: 10
julia> get_graph_type(g)
:coo
```
The `GNNGraph` constructor can also be used to create graphs with different representations.
```jldoctest
julia> g = GNNGraph([2,3,5], [1,2,4], graph_type=:sparse)
GNNGraph:
num_nodes: 5
num_edges: 3
julia> g.graph
5×5 SparseArrays.SparseMatrixCSC{Int64, Int64} with 3 stored entries:
⋅ ⋅ ⋅ ⋅ ⋅
1 ⋅ ⋅ ⋅ ⋅
⋅ 1 ⋅ ⋅ ⋅
⋅ ⋅ ⋅ ⋅ ⋅
⋅ ⋅ ⋅ 1 ⋅
julia> get_graph_type(g)
:sparse
julia> gcoo = GNNGraph(g, graph_type=:coo);
julia> gcoo.graph
([2, 3, 5], [1, 2, 4], [1, 1, 1])
```
"""
get_graph_type(::GNNGraph{<:COO_T}) = :coo
get_graph_type(::GNNGraph{<:SPARSE_T}) = :sparse
get_graph_type(::GNNGraph{<:ADJMAT_T}) = :dense

Graphs.nv(g::GNNGraph) = g.num_nodes
Graphs.ne(g::GNNGraph) = g.num_edges
Expand Down
1 change: 1 addition & 0 deletions GNNGraphs/src/transform.jl
Original file line number Diff line number Diff line change
Expand Up @@ -731,6 +731,7 @@ end
Set `w` as edge weights in the returned graph.
"""
function set_edge_weight(g::GNNGraph, w::AbstractVector)
# TODO preserve the representation instead of converting to COO
s, t = edge_index(g)
@assert length(w) == length(s)

Expand Down
3 changes: 3 additions & 0 deletions GNNGraphs/test/gnngraph.jl
Original file line number Diff line number Diff line change
@@ -1,3 +1,6 @@
# TODO test that the graph type is preserved
# when constructing a GNNGraph from another

@testset "Constructor: adjacency matrix" begin
A = sprand(10, 10, 0.5)
sA, tA, vA = findnz(A)
Expand Down
17 changes: 17 additions & 0 deletions GNNGraphs/test/query.jl
Original file line number Diff line number Diff line change
Expand Up @@ -257,3 +257,20 @@ if GRAPH_T == :coo
end
end

@testset "get_graph_type" begin
g = rand_graph(10, 20, graph_type = GRAPH_T)
@test get_graph_type(g) == GRAPH_T

gsparse = GNNGraph(g, graph_type=:sparse)
@test get_graph_type(gsparse) == :sparse
@test gsparse.graph isa SparseMatrixCSC

gcoo = GNNGraph(g, graph_type=:coo)
@test get_graph_type(gcoo) == :coo
@test gcoo.graph[1:2] isa Tuple{Vector{Int}, Vector{Int}}


gdense = GNNGraph(g, graph_type=:dense)
@test get_graph_type(gdense) == :dense
@test gdense.graph isa Matrix{Int}
end
18 changes: 9 additions & 9 deletions GNNlib/Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -7,35 +7,35 @@ version = "0.2.2"
ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
DataStructures = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8"
GNNGraphs = "aed8fd31-079b-4b5a-b342-a13352159b8c"
GPUArraysCore = "46192b85-c4d5-4398-a991-12ede77f4527"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
MLUtils = "f1d291b0-491e-4a28-83b9-f70985020b54"
NNlib = "872c559c-99b0-510c-b3b7-b6c96a88d5cd"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"

[weakdeps]
AMDGPU = "21141c5a-9bdb-4563-92ae-f87d6854732e"
CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba"

[extensions]
GNNlibAMDGPUExt = "AMDGPU"
GNNlibCUDAExt = "CUDA"

# GPUArraysCore is not needed as a direct dependency
# but pinning it to 0.1 avoids problems when we do Pkg.add("CUDA") in testing
# See https://github.com/JuliaGPU/CUDA.jl/issues/2564

[compat]
AMDGPU = "1"
CUDA = "4, 5"
ChainRulesCore = "1.24"
DataStructures = "0.18"
GNNGraphs = "1.0"
GPUArraysCore = "0.1"
LinearAlgebra = "1"
MLUtils = "0.4"
NNlib = "0.9"
Random = "1"
Statistics = "1"
julia = "1.10"

[extras]
ReTestItems = "817f1d60-ba6b-4fd5-9520-3cf149f6a823"
Reexport = "189a3867-3050-52da-a836-e630ba90ab69"
SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"

[targets]
test = ["Test", "ReTestItems", "Reexport", "SparseArrays"]
34 changes: 34 additions & 0 deletions GNNlib/ext/GNNlibAMDGPUExt.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
module GNNlibAMDGPUExt

using AMDGPU: AnyROCMatrix
using Random, Statistics, LinearAlgebra
using GNNlib: GNNlib, propagate, copy_xj, e_mul_xj, w_mul_xj
using GNNGraphs: GNNGraph, COO_T, SPARSE_T

###### PROPAGATE SPECIALIZATIONS ####################

## COPY_XJ

## avoid the fast path on gpu until we have better cuda support
function GNNlib.propagate(::typeof(copy_xj), g::GNNGraph{<:Union{COO_T, SPARSE_T}}, ::typeof(+),
xi, xj::AnyROCMatrix, e)
propagate((xi, xj, e) -> copy_xj(xi, xj, e), g, +, xi, xj, e)
end

## E_MUL_XJ

## avoid the fast path on gpu until we have better cuda support
function GNNlib.propagate(::typeof(e_mul_xj), g::GNNGraph{<:Union{COO_T, SPARSE_T}}, ::typeof(+),
xi, xj::AnyROCMatrix, e::AbstractVector)
propagate((xi, xj, e) -> e_mul_xj(xi, xj, e), g, +, xi, xj, e)
end

## W_MUL_XJ

## avoid the fast path on gpu until we have better support
function GNNlib.propagate(::typeof(w_mul_xj), g::GNNGraph{<:Union{COO_T, SPARSE_T}}, ::typeof(+),
xi, xj::AnyROCMatrix, e::Nothing)
propagate((xi, xj, e) -> w_mul_xj(xi, xj, e), g, +, xi, xj, e)
end

end #module
2 changes: 1 addition & 1 deletion GNNlib/src/msgpass.jl
Original file line number Diff line number Diff line change
Expand Up @@ -184,7 +184,7 @@ xj_sub_xi(xi, xj, e) = xj .- xi
"""
e_mul_xj(xi, xj, e) = reshape(e, (...)) .* xj
Reshape `e` into broadcast compatible shape with `xj`
Reshape `e` into a broadcast compatible shape with `xj`
(by prepending singleton dimensions) then perform
broadcasted multiplication.
"""
Expand Down
22 changes: 22 additions & 0 deletions GNNlib/test/Project.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
[deps]
FiniteDifferences = "26cc04aa-876d-5657-8c51-4c34ba976000"
Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c"
Functors = "d9f16b24-f501-4c13-a1f2-28368ffc5196"
GNNGraphs = "aed8fd31-079b-4b5a-b342-a13352159b8c"
GNNlib = "a6a84749-d869-43f8-aacc-be26a1996e48"
GPUArraysCore = "46192b85-c4d5-4398-a991-12ede77f4527"
Graphs = "86223c79-3864-5bf0-83f7-82e725a168b6"
MLDataDevices = "7e8f7934-dd98-4c1a-8fe8-92b47a384d40"
MLUtils = "f1d291b0-491e-4a28-83b9-f70985020b54"
NNlib = "872c559c-99b0-510c-b3b7-b6c96a88d5cd"
Pkg = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
Reexport = "189a3867-3050-52da-a836-e630ba90ab69"
SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
TestItemRunner = "f8b46487-2199-4994-9208-9a1283c18c0a"
Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f"

[compat]
GPUArraysCore = "0.1"
Loading

0 comments on commit 49b6398

Please sign in to comment.