diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 663758dab..acdc2f349 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -15,3 +15,6 @@ include: test:dev: allow_failure: true + +coverage: + allow_failure: true diff --git a/.travis.yml b/.travis.yml index fb981bf4f..06424bd4f 100644 --- a/.travis.yml +++ b/.travis.yml @@ -8,6 +8,10 @@ julia: - 1.0 - nightly +matrix: + allow_failures: + - julia: nightly + notifications: email: false @@ -36,9 +40,5 @@ jobs: # - if [[ -a .git/shallow ]]; then git fetch --unshallow; fi # - julia --check-bounds=yes -e 'Pkg.clone(pwd()); Pkg.build("Knet"); Pkg.test("Knet"; coverage=true)' -# matrix: -# allow_failures: -# - julia: nightly ## getting too many warnings - ## https://docs.travis-ci.com/user/languages/julia ## If you leave the julia: key out of your .travis.yml, Travis CI will use the most recent release. diff --git a/src/reduction.jl b/src/reduction.jl index e4e3c1899..5521fef6e 100644 --- a/src/reduction.jl +++ b/src/reduction.jl @@ -5,27 +5,29 @@ import Base: sum, prod, minimum, maximum # , countnz import LinearAlgebra: norm, lmul! import Statistics: mean -sum(::typeof(abs), x::KnetArray; dims=:) = sumabs(x,dims=dims); -sum(::typeof(abs2), x::KnetArray; dims=:) = sumabs2(x,dims=dims); -maximum(::typeof(abs), x::KnetArray; dims=:) = maxabs(x,dims=dims); -minimum(::typeof(abs), x::KnetArray; dims=:) = minabs(x,dims=dims); +sum(::typeof(abs), x::KnetArray; dims=:) = sumabs(x,dims=dims) +sum(::typeof(abs2), x::KnetArray; dims=:) = sumabs2(x,dims=dims) +sum(::typeof(!iszero), x::KnetArray; dims=:) = countnz(x,dims=dims) +maximum(::typeof(abs), x::KnetArray; dims=:) = maxabs(x,dims=dims) +minimum(::typeof(abs), x::KnetArray; dims=:) = minabs(x,dims=dims) sumabs(x;dims=:)=sum(abs,x;dims=dims) sumabs2(x;dims=:)=sum(abs2,x;dims=dims) maxabs(x;dims=:)=maximum(abs,x;dims=dims) minabs(x;dims=:)=minimum(abs,x;dims=dims) +countnz(x;dims=:)=sum(!iszero,x;dims=dims) reduced_dims_compat(dims,region)=map(last, Base.reduced_indices(map(Base.OneTo, dims), region)) function reduction_op(f, j=f, o...) J=Symbol(j) - if isdefined(Base, J); eval(Expr(:import,:Base,J)); end + M = which(@__MODULE__, J) for S in (32,64) T = Symbol("Float$S") F20 = "$(f)_$(S)_20" F21 = "$(f)_$(S)_21" F22 = "$(f)_$(S)_22" @eval begin - function $J(x::KnetArray{$T}; dims=:) + function ($M).$J(x::KnetArray{$T}; dims=:) if dims == Colon() y=@knet8r($F20,$T,(Cint,Ptr{$T}),length(x),x) return y diff --git a/src/rnn.jl b/src/rnn.jl index adbcd1f82..43d416db1 100644 --- a/src/rnn.jl +++ b/src/rnn.jl @@ -731,8 +731,8 @@ function rnntest(r::RNN, ws, x, hx=nothing, cx=nothing; #@assert (r.inputMode == 0 || H == X) L = Int(r.numLayers) * (r.direction == 1 ? 2 : 1) hsize = (H, B, L) - @assert hx == nothing || size(hx) == hsize - @assert cx == nothing || size(cx) == hsize + @assert hx == nothing || eqsize(size(hx), hsize) + @assert cx == nothing || eqsize(size(cx), hsize) h = hx==nothing ? fill!(similar(value(x),hsize),0) : hx # hs = Array{Any}[ h[:,:,l] for l=1:L ] hs = Array{Any}(undef,L) @@ -887,6 +887,16 @@ function rnntest(r::RNN, ws, x, hx=nothing, cx=nothing; return (y,hyout,cyout,nothing) end +# compare sizes ignoring trailing ones +function eqsize(a, b) + na = length(a) + nb = length(b) + (na == nb ? a == b : na > nb ? + a[1:nb] == b && all(a[nb+1:end] .== 1) : + b[1:na] == a && all(b[na+1:end] .== 1)) +end + + # TODO: WIP function rnntest_bs(batchSizes, r::RNN, w, x, hx=nothing, cx=nothing; diff --git a/src/serialize.jl b/src/serialize.jl index 8c860b082..80c81bde6 100644 --- a/src/serialize.jl +++ b/src/serialize.jl @@ -3,11 +3,7 @@ const GPUMODE=Val(1) const CPUMODE=Val(2) serialize(x) = _ser(x,IdDict(),JLDMODE) - -"Return a copy of `x` with all its arrays transferred to GPU." gpucopy(x) = _ser(x,IdDict(),GPUMODE) - -"Return a copy of `x` with all its arrays transferred to CPU." cpucopy(x) = _ser(x,IdDict(),CPUMODE) function _ser(x::KnetPtr,s::IdDict,::typeof(JLDMODE)) @@ -50,25 +46,31 @@ function _ser(x::RNN, s::IdDict, m::typeof(JLDMODE)) end return s[x] end - # Partially fixes the issue: when KA converts to A because no gpu, surrounding parametric types remain Param{KA}. # However other container types that include KnetArray may still have an inconsistent parametric type problem. _ser(x::Param, s::IdDict, m::Val)=(haskey(s,x) ? s[x] : s[x]=Param(_ser(x.value,s,m),_ser(x.opt,s,m))) _ser(x::KnetArray,s::IdDict,::typeof(GPUMODE))=x _ser(x::KnetArray,s::IdDict,::typeof(CPUMODE))=(haskey(s,x) ? s[x] : s[x]=Array(x)) -_ser(x::Array,s::IdDict,::typeof(GPUMODE))=(haskey(s,x) ? s[x] : s[x]=KnetArray(x)) -_ser(x::Array,s::IdDict,::typeof(CPUMODE))=x +_ser(x::Array, s::IdDict, m::Val) = (haskey(s, x) ? s[x] : s[x] = _ser_array_t(x, eltype(x), s, m)) + +function _ser_array_t(@nospecialize(x), T, s::IdDict, m::Val) + if !isbitstype(T) + map(xi->_ser(xi,s,m), x) + elseif m === GPUMODE + KnetArray(x) + else + x + end +end # Generic serialization rules from deepcopy.jl -_ser(x::Union{Symbol,Core.MethodInstance,Method,GlobalRef,DataType,Union,Task},::IdDict,::Val) = x +_ser(x::Union{Symbol,Core.MethodInstance,Method,GlobalRef,DataType,Union,UnionAll,Task},::IdDict,::Val) = x _ser(x::Tuple, s::IdDict, m::Val) = ntuple(i->_ser(x[i], s, m), length(x)) _ser(x::Module, ::IdDict, ::Val) = error("serialize of Modules not supported") _ser(x::Core.SimpleVector, s::IdDict,m::Val) = (haskey(s, x) ? s[x] : s[x] = Core.svec(Any[_ser(x[i], s, m) for i = 1:length(x)]...)) _ser(x::String, s::IdDict,::Val) = (haskey(s, x) ? s[x] : s[x] = (GC.@preserve x unsafe_string(pointer(x), sizeof(x)))) -_ser(x::Array, s::IdDict, m::Val) = (haskey(s, x) ? s[x] : s[x] = _ser_array_t(x, eltype(x), s, m)) -_ser_array_t(@nospecialize(x), T, s::IdDict, m::Val) = (isbitstype(T) ? x : map(xi->_ser(xi,s,m), x)) function _ser(@nospecialize(x), s::IdDict, m::Val) T = typeof(x)::DataType diff --git a/test/serialize.jl b/test/serialize.jl index af8cd84bb..1881d97e8 100644 --- a/test/serialize.jl +++ b/test/serialize.jl @@ -1,4 +1,5 @@ include("header.jl") +import Knet: serialize struct M370; layer; end; @testset "serialize" begin @@ -6,11 +7,15 @@ struct M370; layer; end; M2 = M1 |> cpucopy @test typeof(M2.w.value) <: Array @test M2.w.value == M1.w.value + @test Array{Float32} == serialize(Array{Float32}) if gpu() >= 0 M3 = M2 |> gpucopy @test typeof(M3.w.value) <: KnetArray @test M3.w.value == M2.w.value - + array_of_ka = map(value,params(M3)) + array_of_ca = array_of_ka |> cpucopy + @test first(array_of_ca) isa Array + # 370-1 m = M370(param(5,5,1,1)) mcpu = m |> cpucopy