-
Notifications
You must be signed in to change notification settings - Fork 47
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
NeuralODE example working on cpu and gpu #67
Merged
Merged
Conversation
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
It is not working on gpu on my computer ERROR: type Tuple has no field layers
Stacktrace:
[1] getproperty
@ .\Base.jl:42 [inlined]
[2] functor(#unused#::Type{GNNChain{Tuple{GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}, GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}}}}, c::Tuple{NamedTuple{(:weight, :bias, :σ, :add_self_loops, :use_edge_weight), Tuple{Int64, Int64, Tuple{}, Tuple{}, Tuple{}}}, NamedTuple{(:weight, :bias, :σ, :add_self_loops, :use_edge_weight), Tuple{Int64, Int64, Tuple{}, Tuple{}, Tuple{}}}})
@ GraphNeuralNetworks C:\Users\Luffy\.julia\packages\GraphNeuralNetworks\4e2gU\src\layers\basic.jl:117
[3] _trainable_biwalk(f::Function, x::GNNChain{Tuple{GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}, GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}}}, aux::Tuple{NamedTuple{(:weight, :bias, :σ, :add_self_loops, :use_edge_weight), Tuple{Int64, Int64, Tuple{}, Tuple{}, Tuple{}}}, NamedTuple{(:weight, :bias, :σ, :add_self_loops, :use_edge_weight), Tuple{Int64, Int64, Tuple{}, Tuple{}, Tuple{}}}})
@ Optimisers C:\Users\Luffy\.julia\packages\Optimisers\jjON8\src\destructure.jl:95
[4] #fmap#30
@ C:\Users\Luffy\.julia\packages\Functors\qBIlC\src\functor.jl:78 [inlined]
[5] _rebuild(x::GNNChain{Tuple{GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}, GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}}}, off::Tuple{NamedTuple{(:weight, :bias, :σ, :add_self_loops, :use_edge_weight), Tuple{Int64, Int64, Tuple{}, Tuple{}, Tuple{}}}, NamedTuple{(:weight, :bias, :σ, :add_self_loops, :use_edge_weight), Tuple{Int64, Int64, Tuple{}, Tuple{}, Tuple{}}}}, flat::CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, len::Int64; walk::Function, kw::Base.Pairs{Symbol, Union{}, Tuple{}, NamedTuple{(), Tuple{}}})
@ Optimisers C:\Users\Luffy\.julia\packages\Optimisers\jjON8\src\destructure.jl:84
[6] _rebuild
@ C:\Users\Luffy\.julia\packages\Optimisers\jjON8\src\destructure.jl:83 [inlined]
[7] Restructure
@ C:\Users\Luffy\.julia\packages\Optimisers\jjON8\src\destructure.jl:51 [inlined]
[8] (::GraphNeuralNetworks.var"#re_withgraph#4"{WithGraph{GNNChain{Tuple{GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}, GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}}}, GNNGraph{Tuple{CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, Nothing}}}, Optimisers.Restructure{GNNChain{Tuple{GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}, GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}}}, Tuple{NamedTuple{(:weight, :bias, :σ, :add_self_loops, :use_edge_weight), Tuple{Int64, Int64, Tuple{}, Tuple{}, Tuple{}}}, NamedTuple{(:weight, :bias, :σ, :add_self_loops, :use_edge_weight), Tuple{Int64, Int64, Tuple{}, Tuple{}, Tuple{}}}}}})(x::CuArray{Float32, 1, CUDA.Mem.DeviceBuffer})
@ GraphNeuralNetworks C:\Users\Luffy\.julia\packages\GraphNeuralNetworks\4e2gU\src\layers\basic.jl:60
[9] (::DiffEqFlux.var"#dudt_#133"{NeuralODE{WithGraph{GNNChain{Tuple{GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}, GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}}}, GNNGraph{Tuple{CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, Nothing}}}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, GraphNeuralNetworks.var"#re_withgraph#4"{WithGraph{GNNChain{Tuple{GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}, GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}}}, GNNGraph{Tuple{CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, Nothing}}}, Optimisers.Restructure{GNNChain{Tuple{GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}, GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}}}, Tuple{NamedTuple{(:weight, :bias, :σ, :add_self_loops, :use_edge_weight), Tuple{Int64, Int64, Tuple{}, Tuple{}, Tuple{}}}, NamedTuple{(:weight, :bias, :σ, :add_self_loops, :use_edge_weight), Tuple{Int64, Int64, Tuple{}, Tuple{}, Tuple{}}}}}}, Tuple{Float32, Float32}, Tuple{Tsit5{typeof(OrdinaryDiffEq.trivial_limiter!), typeof(OrdinaryDiffEq.trivial_limiter!), Static.False}}, Base.Pairs{Symbol, Real, NTuple{4, Symbol}, NamedTuple{(:save_everystep, :reltol, :abstol, :save_start), Tuple{Bool, Float64, Float64, Bool}}}}})(u::CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, p::CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, t::Float32)
@ DiffEqFlux C:\Users\Luffy\.julia\packages\DiffEqFlux\JicEk\src\neural_de.jl:73
[10] ODEFunction
@ C:\Users\Luffy\.julia\packages\SciMLBase\UEAKN\src\scimlfunctions.jl:1613 [inlined]
[11] initialize!(integrator::OrdinaryDiffEq.ODEIntegrator{Tsit5{typeof(OrdinaryDiffEq.trivial_limiter!), typeof(OrdinaryDiffEq.trivial_limiter!), Static.False}, false, CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, Nothing, Float32, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, Float32, Float32, Float32, Float32, Vector{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}}, ODESolution{Float32, 3, Vector{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}}, Nothing, Nothing, Vector{Float32}, Vector{Vector{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}}}, ODEProblem{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, Tuple{Float32, Float32}, false, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, ODEFunction{false, DiffEqFlux.var"#dudt_#133"{NeuralODE{WithGraph{GNNChain{Tuple{GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}, GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}}}, GNNGraph{Tuple{CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, Nothing}}}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, GraphNeuralNetworks.var"#re_withgraph#4"{WithGraph{GNNChain{Tuple{GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}, GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}}}, GNNGraph{Tuple{CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, Nothing}}}, Optimisers.Restructure{GNNChain{Tuple{GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}, GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}}}, Tuple{NamedTuple{(:weight, :bias, :σ, :add_self_loops, :use_edge_weight), Tuple{Int64, Int64, Tuple{}, Tuple{}, Tuple{}}}, NamedTuple{(:weight, :bias, :σ, :add_self_loops, :use_edge_weight), Tuple{Int64, Int64, Tuple{}, Tuple{}, Tuple{}}}}}}, Tuple{Float32, Float32}, Tuple{Tsit5{typeof(OrdinaryDiffEq.trivial_limiter!), typeof(OrdinaryDiffEq.trivial_limiter!), Static.False}}, Base.Pairs{Symbol, Real, NTuple{4, Symbol}, NamedTuple{(:save_everystep, :reltol, :abstol, :save_start), Tuple{Bool, Float64, Float64, Bool}}}}}, LinearAlgebra.UniformScaling{Bool}, Nothing, typeof(DiffEqFlux.basic_tgrad), Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing}, Base.Pairs{Symbol, Union{}, Tuple{}, NamedTuple{(), Tuple{}}}, SciMLBase.StandardODEProblem}, Tsit5{typeof(OrdinaryDiffEq.trivial_limiter!), typeof(OrdinaryDiffEq.trivial_limiter!), Static.False}, OrdinaryDiffEq.InterpolationData{ODEFunction{false, DiffEqFlux.var"#dudt_#133"{NeuralODE{WithGraph{GNNChain{Tuple{GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}, GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}}}, GNNGraph{Tuple{CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, Nothing}}}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, GraphNeuralNetworks.var"#re_withgraph#4"{WithGraph{GNNChain{Tuple{GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}, GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}}}, GNNGraph{Tuple{CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, Nothing}}}, Optimisers.Restructure{GNNChain{Tuple{GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}, GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}}}, Tuple{NamedTuple{(:weight, :bias, :σ, :add_self_loops, :use_edge_weight), Tuple{Int64, Int64, Tuple{}, Tuple{}, Tuple{}}}, NamedTuple{(:weight, :bias, :σ, :add_self_loops, :use_edge_weight), Tuple{Int64, Int64, Tuple{}, Tuple{}, Tuple{}}}}}}, Tuple{Float32, Float32}, Tuple{Tsit5{typeof(OrdinaryDiffEq.trivial_limiter!), typeof(OrdinaryDiffEq.trivial_limiter!), Static.False}}, Base.Pairs{Symbol, Real, NTuple{4, Symbol}, NamedTuple{(:save_everystep, :reltol, :abstol, :save_start), Tuple{Bool, Float64, Float64, Bool}}}}}, LinearAlgebra.UniformScaling{Bool}, Nothing, typeof(DiffEqFlux.basic_tgrad), Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing}, Vector{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}}, Vector{Float32}, Vector{Vector{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}}}, OrdinaryDiffEq.Tsit5ConstantCache{Float32, Float32}}, DiffEqBase.DEStats}, ODEFunction{false, DiffEqFlux.var"#dudt_#133"{NeuralODE{WithGraph{GNNChain{Tuple{GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}, GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}}}, GNNGraph{Tuple{CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, Nothing}}}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, GraphNeuralNetworks.var"#re_withgraph#4"{WithGraph{GNNChain{Tuple{GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}, GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}}}, GNNGraph{Tuple{CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, Nothing}}}, Optimisers.Restructure{GNNChain{Tuple{GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}, GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}}}, Tuple{NamedTuple{(:weight, :bias, :σ, :add_self_loops, :use_edge_weight), Tuple{Int64, Int64, Tuple{}, Tuple{}, Tuple{}}}, NamedTuple{(:weight, :bias, :σ, :add_self_loops, :use_edge_weight), Tuple{Int64, Int64, Tuple{}, Tuple{}, Tuple{}}}}}}, Tuple{Float32, Float32}, Tuple{Tsit5{typeof(OrdinaryDiffEq.trivial_limiter!), typeof(OrdinaryDiffEq.trivial_limiter!), Static.False}}, Base.Pairs{Symbol, Real, NTuple{4, Symbol}, NamedTuple{(:save_everystep, :reltol, :abstol, :save_start), Tuple{Bool, Float64, Float64, Bool}}}}}, LinearAlgebra.UniformScaling{Bool}, Nothing, typeof(DiffEqFlux.basic_tgrad), Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing}, OrdinaryDiffEq.Tsit5ConstantCache{Float32, Float32}, OrdinaryDiffEq.DEOptions{Float64, Float64, Float32, Float32, PIController{Rational{Int64}}, typeof(DiffEqBase.ODE_DEFAULT_NORM), typeof(LinearAlgebra.opnorm), Bool, CallbackSet{Tuple{}, Tuple{}}, typeof(DiffEqBase.ODE_DEFAULT_ISOUTOFDOMAIN), typeof(DiffEqBase.ODE_DEFAULT_PROG_MESSAGE), typeof(DiffEqBase.ODE_DEFAULT_UNSTABLE_CHECK), DataStructures.BinaryHeap{Float32, DataStructures.FasterForward}, DataStructures.BinaryHeap{Float32, DataStructures.FasterForward}, Nothing, Nothing, Int64, Tuple{}, Tuple{}, Tuple{}}, CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, Float32, Nothing, OrdinaryDiffEq.DefaultInit}, cache::OrdinaryDiffEq.Tsit5ConstantCache{Float32, Float32})
@ OrdinaryDiffEq C:\Users\Luffy\.julia\packages\OrdinaryDiffEq\ZgJ9s\src\perform_step\low_order_rk_perform_step.jl:569
[12] __init(prob::ODEProblem{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, Tuple{Float32, Float32}, false, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, ODEFunction{false, DiffEqFlux.var"#dudt_#133"{NeuralODE{WithGraph{GNNChain{Tuple{GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}, GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}}}, GNNGraph{Tuple{CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, Nothing}}}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, GraphNeuralNetworks.var"#re_withgraph#4"{WithGraph{GNNChain{Tuple{GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}, GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}}}, GNNGraph{Tuple{CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, Nothing}}}, Optimisers.Restructure{GNNChain{Tuple{GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}, GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}}}, Tuple{NamedTuple{(:weight, :bias, :σ, :add_self_loops, :use_edge_weight), Tuple{Int64, Int64, Tuple{}, Tuple{}, Tuple{}}}, NamedTuple{(:weight, :bias, :σ, :add_self_loops, :use_edge_weight), Tuple{Int64, Int64, Tuple{}, Tuple{}, Tuple{}}}}}}, Tuple{Float32, Float32}, Tuple{Tsit5{typeof(OrdinaryDiffEq.trivial_limiter!), typeof(OrdinaryDiffEq.trivial_limiter!), Static.False}}, Base.Pairs{Symbol, Real, NTuple{4, Symbol}, NamedTuple{(:save_everystep, :reltol, :abstol, :save_start), Tuple{Bool, Float64, Float64, Bool}}}}}, LinearAlgebra.UniformScaling{Bool}, Nothing, typeof(DiffEqFlux.basic_tgrad), Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing}, Base.Pairs{Symbol, Union{}, Tuple{}, NamedTuple{(), Tuple{}}}, SciMLBase.StandardODEProblem}, alg::Tsit5{typeof(OrdinaryDiffEq.trivial_limiter!), typeof(OrdinaryDiffEq.trivial_limiter!), Static.False}, timeseries_init::Tuple{}, ts_init::Tuple{}, ks_init::Tuple{}, recompile::Type{Val{true}}; saveat::Tuple{}, tstops::Tuple{}, d_discontinuities::Tuple{}, save_idxs::Nothing, save_everystep::Bool, save_on::Bool, save_start::Bool, save_end::Bool, callback::Nothing, dense::Bool, calck::Bool, dt::Float32, dtmin::Nothing, dtmax::Float32, force_dtmin::Bool, adaptive::Bool, gamma::Rational{Int64}, abstol::Float64, reltol::Float64, qmin::Rational{Int64}, qmax::Int64, qsteady_min::Int64, qsteady_max::Int64, beta1::Nothing, beta2::Nothing, qoldinit::Rational{Int64}, controller::Nothing, fullnormalize::Bool, failfactor::Int64, maxiters::Int64, internalnorm::typeof(DiffEqBase.ODE_DEFAULT_NORM), internalopnorm::typeof(LinearAlgebra.opnorm), isoutofdomain::typeof(DiffEqBase.ODE_DEFAULT_ISOUTOFDOMAIN), unstable_check::typeof(DiffEqBase.ODE_DEFAULT_UNSTABLE_CHECK), verbose::Bool, timeseries_errors::Bool, dense_errors::Bool, advance_to_tstop::Bool, stop_at_next_tstop::Bool, initialize_save::Bool, progress::Bool, progress_steps::Int64, progress_name::String, progress_message::typeof(DiffEqBase.ODE_DEFAULT_PROG_MESSAGE), userdata::Nothing, allow_extrapolation::Bool, initialize_integrator::Bool, alias_u0::Bool, alias_du0::Bool, initializealg::OrdinaryDiffEq.DefaultInit,
kwargs::Base.Pairs{Symbol, Bool, Tuple{Symbol}, NamedTuple{(:save_noise,), Tuple{Bool}}})
@ OrdinaryDiffEq C:\Users\Luffy\.julia\packages\OrdinaryDiffEq\ZgJ9s\src\solve.jl:456
[13] #__solve#502
@ C:\Users\Luffy\.julia\packages\OrdinaryDiffEq\ZgJ9s\src\solve.jl:4 [inlined]
[14] #solve_call#28
@ C:\Users\Luffy\.julia\packages\DiffEqBase\hZncn\src\solve.jl:428 [inlined]
[15] solve_up(prob::ODEProblem{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, Tuple{Float32, Float32}, false, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, ODEFunction{false, DiffEqFlux.var"#dudt_#133"{NeuralODE{WithGraph{GNNChain{Tuple{GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}, GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}}}, GNNGraph{Tuple{CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, Nothing}}}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, GraphNeuralNetworks.var"#re_withgraph#4"{WithGraph{GNNChain{Tuple{GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}, GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}}}, GNNGraph{Tuple{CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, Nothing}}}, Optimisers.Restructure{GNNChain{Tuple{GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}, GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}}}, Tuple{NamedTuple{(:weight, :bias, :σ, :add_self_loops, :use_edge_weight), Tuple{Int64, Int64, Tuple{}, Tuple{}, Tuple{}}}, NamedTuple{(:weight, :bias, :σ, :add_self_loops, :use_edge_weight), Tuple{Int64, Int64, Tuple{}, Tuple{}, Tuple{}}}}}}, Tuple{Float32, Float32}, Tuple{Tsit5{typeof(OrdinaryDiffEq.trivial_limiter!), typeof(OrdinaryDiffEq.trivial_limiter!), Static.False}}, Base.Pairs{Symbol, Real, NTuple{4, Symbol}, NamedTuple{(:save_everystep, :reltol, :abstol, :save_start), Tuple{Bool, Float64, Float64, Bool}}}}}, LinearAlgebra.UniformScaling{Bool}, Nothing, typeof(DiffEqFlux.basic_tgrad), Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing}, Base.Pairs{Symbol, Union{}, Tuple{}, NamedTuple{(), Tuple{}}}, SciMLBase.StandardODEProblem}, sensealg::Nothing, u0::CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, p::CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, args::Tsit5{typeof(OrdinaryDiffEq.trivial_limiter!), typeof(OrdinaryDiffEq.trivial_limiter!), Static.False}; kwargs::Base.Pairs{Symbol, Real, NTuple{6, Symbol}, NamedTuple{(:save_noise, :save_start, :save_end, :save_everystep, :reltol, :abstol), Tuple{Bool, Bool, Bool, Bool, Float64, Float64}}})
@ DiffEqBase C:\Users\Luffy\.julia\packages\DiffEqBase\hZncn\src\solve.jl:726
[16] #solve#29
@ C:\Users\Luffy\.julia\packages\DiffEqBase\hZncn\src\solve.jl:710 [inlined]
[17] _concrete_solve_adjoint(::ODEProblem{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, Tuple{Float32, Float32}, false, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, ODEFunction{false, DiffEqFlux.var"#dudt_#133"{NeuralODE{WithGraph{GNNChain{Tuple{GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}, GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}}}, GNNGraph{Tuple{CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, Nothing}}}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, GraphNeuralNetworks.var"#re_withgraph#4"{WithGraph{GNNChain{Tuple{GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}, GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}}}, GNNGraph{Tuple{CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, Nothing}}}, Optimisers.Restructure{GNNChain{Tuple{GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}, GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}}}, Tuple{NamedTuple{(:weight, :bias, :σ, :add_self_loops, :use_edge_weight), Tuple{Int64, Int64, Tuple{}, Tuple{}, Tuple{}}}, NamedTuple{(:weight, :bias, :σ, :add_self_loops, :use_edge_weight), Tuple{Int64, Int64, Tuple{}, Tuple{}, Tuple{}}}}}}, Tuple{Float32, Float32}, Tuple{Tsit5{typeof(OrdinaryDiffEq.trivial_limiter!), typeof(OrdinaryDiffEq.trivial_limiter!), Static.False}}, Base.Pairs{Symbol, Real, NTuple{4, Symbol}, NamedTuple{(:save_everystep, :reltol, :abstol, :save_start), Tuple{Bool, Float64, Float64, Bool}}}}}, LinearAlgebra.UniformScaling{Bool}, Nothing, typeof(DiffEqFlux.basic_tgrad), Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing}, Base.Pairs{Symbol, Union{}, Tuple{}, NamedTuple{(), Tuple{}}}, SciMLBase.StandardODEProblem}, ::Tsit5{typeof(OrdinaryDiffEq.trivial_limiter!), typeof(OrdinaryDiffEq.trivial_limiter!), Static.False}, ::InterpolatingAdjoint{0, true, Val{:central}, ZygoteVJP, Bool}, ::CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, ::CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}; save_start::Bool, save_end::Bool, saveat::Vector{Float32}, save_idxs::Nothing, kwargs::Base.Pairs{Symbol, Real, Tuple{Symbol, Symbol, Symbol}, NamedTuple{(:save_everystep, :reltol, :abstol), Tuple{Bool, Float64, Float64}}})
@ DiffEqSensitivity C:\Users\Luffy\.julia\packages\DiffEqSensitivity\SjURy\src\concrete_solve.jl:131
[18] #_solve_adjoint#52
@ C:\Users\Luffy\.julia\packages\DiffEqBase\hZncn\src\solve.jl:1069 [inlined]
[19] #rrule#50
@ C:\Users\Luffy\.julia\packages\DiffEqBase\hZncn\src\solve.jl:1032 [inlined]
[20] chain_rrule_kw
@ C:\Users\Luffy\.julia\packages\Zygote\DkIUK\src\compiler\chainrules.jl:229 [inlined]
[21] macro expansion
@ C:\Users\Luffy\.julia\packages\Zygote\DkIUK\src\compiler\interface2.jl:0 [inlined]
[22] _pullback
@ C:\Users\Luffy\.julia\packages\Zygote\DkIUK\src\compiler\interface2.jl:9 [inlined]
[23] _apply
@ .\boot.jl:814 [inlined]
[24] adjoint
@ C:\Users\Luffy\.julia\packages\Zygote\DkIUK\src\lib\lib.jl:204 [inlined]
[25] _pullback
@ C:\Users\Luffy\.julia\packages\ZygoteRules\AIbCs\src\adjoint.jl:65 [inlined]
[26] _pullback
@ C:\Users\Luffy\.julia\packages\DiffEqBase\hZncn\src\solve.jl:710 [inlined]
[27] _pullback(::Zygote.Context, ::DiffEqBase.var"##solve#29", ::InterpolatingAdjoint{0, true, Val{:central}, ZygoteVJP, Bool}, ::Nothing, ::Nothing, ::Base.Pairs{Symbol, Real, NTuple{4, Symbol}, NamedTuple{(:save_everystep, :reltol, :abstol, :save_start), Tuple{Bool, Float64, Float64, Bool}}}, ::typeof(solve), ::ODEProblem{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, Tuple{Float32, Float32}, false, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, ODEFunction{false, DiffEqFlux.var"#dudt_#133"{NeuralODE{WithGraph{GNNChain{Tuple{GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}, GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}}}, GNNGraph{Tuple{CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, Nothing}}}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, GraphNeuralNetworks.var"#re_withgraph#4"{WithGraph{GNNChain{Tuple{GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}, GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}}}, GNNGraph{Tuple{CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, Nothing}}}, Optimisers.Restructure{GNNChain{Tuple{GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}, GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}}}, Tuple{NamedTuple{(:weight, :bias, :σ, :add_self_loops, :use_edge_weight), Tuple{Int64, Int64, Tuple{}, Tuple{}, Tuple{}}}, NamedTuple{(:weight, :bias, :σ, :add_self_loops, :use_edge_weight), Tuple{Int64, Int64, Tuple{}, Tuple{}, Tuple{}}}}}}, Tuple{Float32, Float32}, Tuple{Tsit5{typeof(OrdinaryDiffEq.trivial_limiter!), typeof(OrdinaryDiffEq.trivial_limiter!), Static.False}}, Base.Pairs{Symbol, Real, NTuple{4, Symbol}, NamedTuple{(:save_everystep, :reltol, :abstol, :save_start), Tuple{Bool, Float64, Float64, Bool}}}}}, LinearAlgebra.UniformScaling{Bool}, Nothing, typeof(DiffEqFlux.basic_tgrad), Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing}, Base.Pairs{Symbol, Union{}, Tuple{}, NamedTuple{(), Tuple{}}}, SciMLBase.StandardODEProblem}, ::Tsit5{typeof(OrdinaryDiffEq.trivial_limiter!), typeof(OrdinaryDiffEq.trivial_limiter!), Static.False})
@ Zygote C:\Users\Luffy\.julia\packages\Zygote\DkIUK\src\compiler\interface2.jl:0
[28] _apply(::Function, ::Vararg{Any})
@ Core .\boot.jl:814
[29] adjoint
@ C:\Users\Luffy\.julia\packages\Zygote\DkIUK\src\lib\lib.jl:204 [inlined]
[30] _pullback
@ C:\Users\Luffy\.julia\packages\ZygoteRules\AIbCs\src\adjoint.jl:65 [inlined]
[31] _pullback
@ C:\Users\Luffy\.julia\packages\DiffEqBase\hZncn\src\solve.jl:705 [inlined]
[32] _pullback(::Zygote.Context, ::CommonSolve.var"#solve##kw", ::NamedTuple{(:sensealg, :save_everystep, :reltol, :abstol, :save_start), Tuple{InterpolatingAdjoint{0, true, Val{:central}, ZygoteVJP, Bool}, Bool, Float64, Float64, Bool}}, ::typeof(solve), ::ODEProblem{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, Tuple{Float32, Float32}, false, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, ODEFunction{false, DiffEqFlux.var"#dudt_#133"{NeuralODE{WithGraph{GNNChain{Tuple{GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}, GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}}}, GNNGraph{Tuple{CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, Nothing}}}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, GraphNeuralNetworks.var"#re_withgraph#4"{WithGraph{GNNChain{Tuple{GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}, GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}}}, GNNGraph{Tuple{CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, Nothing}}}, Optimisers.Restructure{GNNChain{Tuple{GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}, GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}}}, Tuple{NamedTuple{(:weight, :bias, :σ, :add_self_loops, :use_edge_weight), Tuple{Int64, Int64, Tuple{}, Tuple{}, Tuple{}}}, NamedTuple{(:weight, :bias, :σ, :add_self_loops, :use_edge_weight), Tuple{Int64, Int64, Tuple{}, Tuple{}, Tuple{}}}}}}, Tuple{Float32, Float32}, Tuple{Tsit5{typeof(OrdinaryDiffEq.trivial_limiter!), typeof(OrdinaryDiffEq.trivial_limiter!), Static.False}}, Base.Pairs{Symbol, Real, NTuple{4, Symbol}, NamedTuple{(:save_everystep, :reltol, :abstol, :save_start), Tuple{Bool, Float64, Float64, Bool}}}}}, LinearAlgebra.UniformScaling{Bool}, Nothing, typeof(DiffEqFlux.basic_tgrad), Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing}, Base.Pairs{Symbol, Union{}, Tuple{}, NamedTuple{(), Tuple{}}}, SciMLBase.StandardODEProblem}, ::Tsit5{typeof(OrdinaryDiffEq.trivial_limiter!), typeof(OrdinaryDiffEq.trivial_limiter!), Static.False})
@ Zygote C:\Users\Luffy\.julia\packages\Zygote\DkIUK\src\compiler\interface2.jl:0
[33] _apply(::Function, ::Vararg{Any})
@ Core .\boot.jl:814
[34] adjoint
@ C:\Users\Luffy\.julia\packages\Zygote\DkIUK\src\lib\lib.jl:204 [inlined]
[35] _pullback
@ C:\Users\Luffy\.julia\packages\ZygoteRules\AIbCs\src\adjoint.jl:65 [inlined]
[36] _pullback
@ C:\Users\Luffy\.julia\packages\DiffEqFlux\JicEk\src\neural_de.jl:77 [inlined]
[37] _pullback(::Zygote.Context, ::NeuralODE{WithGraph{GNNChain{Tuple{GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}, GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}}}, GNNGraph{Tuple{CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, Nothing}}}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, GraphNeuralNetworks.var"#re_withgraph#4"{WithGraph{GNNChain{Tuple{GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}, GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}}}, GNNGraph{Tuple{CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, Nothing}}}, Optimisers.Restructure{GNNChain{Tuple{GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}, GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}}}, Tuple{NamedTuple{(:weight, :bias, :σ, :add_self_loops, :use_edge_weight), Tuple{Int64, Int64, Tuple{}, Tuple{}, Tuple{}}}, NamedTuple{(:weight, :bias, :σ, :add_self_loops, :use_edge_weight), Tuple{Int64, Int64, Tuple{}, Tuple{}, Tuple{}}}}}}, Tuple{Float32, Float32}, Tuple{Tsit5{typeof(OrdinaryDiffEq.trivial_limiter!), typeof(OrdinaryDiffEq.trivial_limiter!), Static.False}}, Base.Pairs{Symbol, Real, NTuple{4, Symbol}, NamedTuple{(:save_everystep, :reltol, :abstol, :save_start), Tuple{Bool, Float64, Float64, Bool}}}}, ::CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, ::CuArray{Float32, 1, CUDA.Mem.DeviceBuffer})
@ Zygote C:\Users\Luffy\.julia\packages\Zygote\DkIUK\src\compiler\interface2.jl:0
[38] _pullback
@ C:\Users\Luffy\.julia\packages\DiffEqFlux\JicEk\src\neural_de.jl:73 [inlined]
[39] _pullback(ctx::Zygote.Context, f::NeuralODE{WithGraph{GNNChain{Tuple{GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}, GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}}}, GNNGraph{Tuple{CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, Nothing}}}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, GraphNeuralNetworks.var"#re_withgraph#4"{WithGraph{GNNChain{Tuple{GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}, GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}}}, GNNGraph{Tuple{CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, Nothing}}}, Optimisers.Restructure{GNNChain{Tuple{GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}, GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}}}, Tuple{NamedTuple{(:weight, :bias, :σ, :add_self_loops, :use_edge_weight), Tuple{Int64, Int64, Tuple{}, Tuple{}, Tuple{}}}, NamedTuple{(:weight, :bias, :σ, :add_self_loops, :use_edge_weight), Tuple{Int64, Int64, Tuple{}, Tuple{}, Tuple{}}}}}}, Tuple{Float32, Float32}, Tuple{Tsit5{typeof(OrdinaryDiffEq.trivial_limiter!), typeof(OrdinaryDiffEq.trivial_limiter!), Static.False}}, Base.Pairs{Symbol, Real, NTuple{4, Symbol}, NamedTuple{(:save_everystep, :reltol, :abstol, :save_start), Tuple{Bool, Float64, Float64, Bool}}}}, args::CuArray{Float32, 2, CUDA.Mem.DeviceBuffer})
@ Zygote C:\Users\Luffy\.julia\packages\Zygote\DkIUK\src\compiler\interface2.jl:0
[40] _pullback
@ C:\Users\Luffy\.julia\packages\GraphNeuralNetworks\4e2gU\src\layers\basic.jl:124 [inlined]
[41] _pullback(::Zygote.Context, ::typeof(GraphNeuralNetworks.applylayer), ::NeuralODE{WithGraph{GNNChain{Tuple{GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}, GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}}}, GNNGraph{Tuple{CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, Nothing}}}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, GraphNeuralNetworks.var"#re_withgraph#4"{WithGraph{GNNChain{Tuple{GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}, GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}}}, GNNGraph{Tuple{CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, Nothing}}}, Optimisers.Restructure{GNNChain{Tuple{GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}, GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}}}, Tuple{NamedTuple{(:weight, :bias, :σ, :add_self_loops, :use_edge_weight), Tuple{Int64, Int64, Tuple{}, Tuple{}, Tuple{}}}, NamedTuple{(:weight, :bias, :σ, :add_self_loops, :use_edge_weight), Tuple{Int64, Int64, Tuple{}, Tuple{}, Tuple{}}}}}}, Tuple{Float32, Float32}, Tuple{Tsit5{typeof(OrdinaryDiffEq.trivial_limiter!), typeof(OrdinaryDiffEq.trivial_limiter!), Static.False}}, Base.Pairs{Symbol, Real, NTuple{4, Symbol}, NamedTuple{(:save_everystep, :reltol, :abstol, :save_start), Tuple{Bool, Float64, Float64, Bool}}}}, ::GNNGraph{Tuple{CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, Nothing}}, ::CuArray{Float32, 2, CUDA.Mem.DeviceBuffer})
@ Zygote C:\Users\Luffy\.julia\packages\Zygote\DkIUK\src\compiler\interface2.jl:0
[42] _pullback
@ C:\Users\Luffy\.julia\packages\GraphNeuralNetworks\4e2gU\src\layers\basic.jl:137 [inlined]
--- the last 2 lines are repeated 1 more time ---
[45] _pullback(::Zygote.Context, ::typeof(GraphNeuralNetworks.applychain), ::Tuple{GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}, NeuralODE{WithGraph{GNNChain{Tuple{GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}, GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}}}, GNNGraph{Tuple{CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, Nothing}}}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, GraphNeuralNetworks.var"#re_withgraph#4"{WithGraph{GNNChain{Tuple{GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}, GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}}}, GNNGraph{Tuple{CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, Nothing}}}, Optimisers.Restructure{GNNChain{Tuple{GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}, GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}}}, Tuple{NamedTuple{(:weight, :bias, :σ, :add_self_loops, :use_edge_weight), Tuple{Int64, Int64, Tuple{}, Tuple{}, Tuple{}}}, NamedTuple{(:weight, :bias, :σ, :add_self_loops, :use_edge_weight), Tuple{Int64, Int64, Tuple{}, Tuple{}, Tuple{}}}}}}, Tuple{Float32, Float32}, Tuple{Tsit5{typeof(OrdinaryDiffEq.trivial_limiter!), typeof(OrdinaryDiffEq.trivial_limiter!), Static.False}}, Base.Pairs{Symbol, Real, NTuple{4, Symbol}, NamedTuple{(:save_everystep, :reltol, :abstol, :save_start), Tuple{Bool, Float64, Float64, Bool}}}}, typeof(diffeqsol_to_array), Dense{typeof(identity), CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}}}, ::GNNGraph{Tuple{CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, Nothing}}, ::CuArray{Float32, 2, CUDA.Mem.DeviceBuffer})
@ Zygote C:\Users\Luffy\.julia\packages\Zygote\DkIUK\src\compiler\interface2.jl:0
[46] _pullback
@ C:\Users\Luffy\.julia\packages\GraphNeuralNetworks\4e2gU\src\layers\basic.jl:139 [inlined]
[47] _pullback(::Zygote.Context, ::GNNChain{Tuple{GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}, NeuralODE{WithGraph{GNNChain{Tuple{GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}, GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}}}, GNNGraph{Tuple{CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, Nothing}}}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, GraphNeuralNetworks.var"#re_withgraph#4"{WithGraph{GNNChain{Tuple{GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}, GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}}}, GNNGraph{Tuple{CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, Nothing}}}, Optimisers.Restructure{GNNChain{Tuple{GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}, GCNConv{CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, typeof(relu)}}}, Tuple{NamedTuple{(:weight, :bias, :σ, :add_self_loops, :use_edge_weight), Tuple{Int64, Int64, Tuple{}, Tuple{}, Tuple{}}}, NamedTuple{(:weight, :bias, :σ, :add_self_loops, :use_edge_weight), Tuple{Int64, Int64, Tuple{}, Tuple{}, Tuple{}}}}}}, Tuple{Float32, Float32}, Tuple{Tsit5{typeof(OrdinaryDiffEq.trivial_limiter!), typeof(OrdinaryDiffEq.trivial_limiter!), Static.False}}, Base.Pairs{Symbol, Real, NTuple{4, Symbol}, NamedTuple{(:save_everystep, :reltol, :abstol, :save_start), Tuple{Bool, Float64, Float64, Bool}}}}, typeof(diffeqsol_to_array), Dense{typeof(identity), CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}}}}, ::GNNGraph{Tuple{CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}, Nothing}}, ::CuArray{Float32, 2, CUDA.Mem.DeviceBuffer})
@ Zygote C:\Users\Luffy\.julia\packages\Zygote\DkIUK\src\compiler\interface2.jl:0
[48] _pullback
@ .\REPL[37]:3 [inlined]
[49] _pullback(::Zygote.Context, ::var"#1#2")
@ Zygote C:\Users\Luffy\.julia\packages\Zygote\DkIUK\src\compiler\interface2.jl:0
[50] pullback(f::Function, ps::Params{Zygote.Buffer{Any, Vector{Any}}})
@ Zygote C:\Users\Luffy\.julia\packages\Zygote\DkIUK\src\compiler\interface.jl:352
[51] gradient(f::Function, args::Params{Zygote.Buffer{Any, Vector{Any}}})
@ Zygote C:\Users\Luffy\.julia\packages\Zygote\DkIUK\src\compiler\interface.jl:75
[52] top-level scope
@ REPL[37]:2 If it is working on yours, could you provide me with the loss & accuracy? |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
No description provided.