diff --git a/ext/NNlibCUDA/Project.toml b/ext/NNlibCUDA/Project.toml index 125a8340d..e3f0fe67e 100644 --- a/ext/NNlibCUDA/Project.toml +++ b/ext/NNlibCUDA/Project.toml @@ -1,6 +1,6 @@ name = "NNlibCUDA" uuid = "a00861dc-f156-4864-bf3c-e6376f28a68d" -version = "0.2.4" +version = "0.2.5" [deps] Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" diff --git a/ext/NNlibCUDA/src/cudnn/activations.jl b/ext/NNlibCUDA/src/cudnn/activations.jl index e7fd5f3d8..6749a5b44 100644 --- a/ext/NNlibCUDA/src/cudnn/activations.jl +++ b/ext/NNlibCUDA/src/cudnn/activations.jl @@ -4,7 +4,8 @@ using Base.Broadcast using CUDA.CUDNN: cudnnActivationForward!, cudnnOpTensor!, CUDNN_ACTIVATION_TANH,CUDNN_ACTIVATION_SIGMOID,CUDNN_ACTIVATION_ELU, - CUDNN_ACTIVATION_RELU,CUDNN_ACTIVATION_CLIPPED_RELU,CUDNN_OP_TENSOR_MAX + CUDNN_ACTIVATION_RELU,CUDNN_ACTIVATION_CLIPPED_RELU,CUDNN_OP_TENSOR_MAX, + CUDNN_ACTIVATION_IDENTITY for (f, op) in [ CUDA.tanh => (src,dst)->cudnnActivationForward!(dst, src, mode=CUDNN_ACTIVATION_TANH), diff --git a/ext/NNlibCUDA/test/conv.jl b/ext/NNlibCUDA/test/conv.jl index bfb6cd376..7e3f572d8 100644 --- a/ext/NNlibCUDA/test/conv.jl +++ b/ext/NNlibCUDA/test/conv.jl @@ -8,6 +8,12 @@ using NNlib: DenseConvDims @test ∇conv_data(c, b, cdims) ≈ collect(∇conv_data(dc, db, cdims)) @test ∇conv_filter(a, c, cdims) ≈ collect(∇conv_filter(da, dc, cdims)) + # Test Conv Bias Activation + bias = rand(Float64, 1, 1, 4, 1) + dbias = CuArray(bias) + @test conv_bias_act(a, b, cdims, bias, NNlib.relu) ≈ collect(conv_bias_act(da, db, cdims, dbias, NNlib.relu)) + @test conv_bias_act(a, b, cdims, bias, identity) ≈ collect(conv_bias_act(da, db, cdims, dbias, identity)) + # Test for agreement between CPU NNlib and CuDNN versions, across a variety of kwargs options = Dict{Any, Any}.(( (), (:dilation => 2), (:flipkernel => true), (:stride => 2),