Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Julia_v0.7_update #4

Merged
merged 3 commits into from
Jul 28, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 0 additions & 2 deletions .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,7 @@
language: julia
os:
- linux
- osx
julia:
- 0.5
- 0.6
- nightly
matrix:
Expand Down
4 changes: 2 additions & 2 deletions REQUIRE
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
Compat 0.17.0
julia 0.5
Compat 1.0.0
julia 0.6
StatsBase
89 changes: 43 additions & 46 deletions src/CoupledFields.jl
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
module CoupledFields

using Compat
using StatsBase: zscore, sample
using Compat.LinearAlgebra
using Compat.Statistics
using StatsBase: zscore
export InputSpace, ModelObj
export KernelParameters, GaussianKP, PolynomialKP
export gradvecfield
Expand All @@ -15,29 +17,29 @@ include("MLKernels.jl")
All KernelParameters types contain certain parameters which are later passed to internal functions `Kf` and `∇Kf`. \n
A KernelParameters type is set using e.g. `PolynomialKP(X::Matrix{Float64})` or `GaussianKP(X::Matrix{Float64})`.
"""
@compat abstract type KernelParameters end
abstract type KernelParameters end

"""
GaussianKP: For the gaussian kernel
"""
type GaussianKP <: KernelParameters
struct GaussianKP <: KernelParameters
xx::Matrix{Float64}
varx::Float64
end

function GaussianKP(X::Matrix{Float64})
xx = kernelmatrix(SquaredDistanceKernel(1.0), X, X)
varx = median(xx[xx.>10.0^-9])
varx = Statistics.median(xx[xx.>10.0^-9])
return GaussianKP(xx, varx)
end


function Kf{T<:Float64}(par::Array{T}, X::Matrix{T}, kpars::GaussianKP)
function Kf(par::Array{T}, X::Matrix{T}, kpars::GaussianKP) where T<:Float64
sx2 = 2*par[1]*par[1]*kpars.varx
return exp.(-kpars.xx/sx2)
end

function ∇Kf{T<:Float64}(par::Array{T}, X::Matrix{T}, kpars::GaussianKP)
function ∇Kf(par::Array{T}, X::Matrix{T}, kpars::GaussianKP) where T<:Float64
n,p = size(X)
sx2 = 2*par[1]*par[1]*kpars.varx
Gx = Kf(par, X, kpars)
Expand All @@ -49,7 +51,7 @@ end
"""
PolynomialKP: For the polynomial kernel
"""
type PolynomialKP <: KernelParameters
struct PolynomialKP <: KernelParameters
xx::Matrix{Float64}
function PolynomialKP(X::Matrix{Float64})
xx = kernelmatrix(LinearKernel(1.0, 1.0), X, X)
Expand All @@ -58,11 +60,11 @@ type PolynomialKP <: KernelParameters
end


function Kf{T<:Float64}(par::Array{T}, X::Matrix{T}, kpars::PolynomialKP)
function Kf(par::Array{T}, X::Matrix{T}, kpars::PolynomialKP) where T<:Float64
return (kpars.xx).^par[1]
end

function ∇Kf{T<:Float64}(par::Array{T}, X::Matrix{T}, kpars::PolynomialKP)
function ∇Kf(par::Array{T}, X::Matrix{T}, kpars::PolynomialKP) where T<:Float64
n,p = size(X)
m = par[1]
∇K = Float64[m*X[k,j]*kpars.xx[i,k]^(m-1.0) for i in 1:n, k in 1:n, j in 1:p ]
Expand All @@ -75,7 +77,7 @@ end
ModelObj: A type to hold statistical model results
Such as the matrices `W, R, A, T`, where `R=XW` and `T=YA`.
"""
type ModelObj
struct ModelObj
W::Matrix{Float64}
R::Matrix{Float64}
A::Matrix{Float64}
Expand All @@ -92,32 +94,31 @@ end
InputSpace(X, Y, d, lat): The fields are whitened if `d=[d1, d2]` is supplied.
Area weighting is applied if `lat` is supplied.
"""
type InputSpace
struct InputSpace
X::Matrix{Float64}
Y::Matrix{Float64}
function InputSpace{T<:Matrix{Float64}}(a::T, b::T)
function InputSpace(a::T, b::T) where T<:Matrix{Float64}
new(zscore(a,1), zscore(b,1))
end
function InputSpace{T<:Matrix{Float64}}(a::T, b::T, d::Vector{Float64})
function InputSpace(a::T, b::T, d::Vector{Float64}) where T<:Matrix{Float64}
new(whiten(a, d[1]), whiten(b, d[2]))
end
function InputSpace{T<:Matrix{Float64},V<:Vector{Float64}}(a::T, b::T, d::V, lat::V)
function InputSpace(a::T, b::T, d::V, lat::V) where {V<:Matrix{Float64}, T<:Vector{Float64}}
new(whiten(a, d[1], lat=lat), whiten(b, d[2], lat=lat))
end
end



"""
gradvecfield{N<:Float64, T<:Matrix{Float64}}(par::Array{N}, X::T, Y::T, kpars::KernelParameters ):
gradvecfield(par::Array{Float64}, X::T, Y::T, kpars::KernelParameters) where T<:Matrix{Float64}:
Compute the gradient vector or gradient matrix at each instance of the `X` and `Y` fields, by making use of a kernel feature space.
"""
function gradvecfield{N<:Float64, T<:Matrix{Float64}}(par::Array{N}, X::T, Y::T, kpars::KernelParameters )
function gradvecfield(par::Array{Float64}, X::T, Y::T, kpars::KernelParameters ) where T<:Matrix{Float64}
n,p = size(X)
Ix = (10.0^par[2])*n*eye(n)
Gx = Kf(par, X, kpars)
Gx = Kf(par, X, kpars) + (10.0^par[2])*n*I
∇K = ∇Kf(par, X, kpars)
return [∇K[i,:,:]' * ((Gx+Ix) \ Y) for i in 1:n]
return [∇K[i,:,:]' * (Gx \ Y) for i in 1:n]
end


Expand All @@ -128,34 +129,30 @@ Compute a piecewise linear basis matrix for the vector x.
"""
function bf(x::Vector{Float64}, df::Int)
if df<2 return x[:,1:1] end
bp = quantile(x, linspace(0, 1, df+1))
bp = Statistics.quantile(x, Compat.range(0, stop=1, length=df+1))
n = length(x)
a1 = repmat(x,1,df)
a2 = repmat( bp[1:df]', n, 1)
a1 = repeat(x, inner=(1,df))
a2 = repeat( bp[1:df]', inner=(n,1))

for j in 1:df
ix1 = x.<bp[j]; a1[ix1,j] = bp[j]
ix2 = x.>bp[j+1]; a1[ix2,j] = bp[j+1]
ix1 = x.<bp[j]; a1[ix1,j] .= bp[j]
ix2 = x.>bp[j+1]; a1[ix2,j] .= bp[j+1]
end

return a1-a2
end

"""
cca{T<:Matrix{Float64}}(v::Array{Float64}, X::T,Y::T):
cca(v::Array{Float64}, X::T,Y::T) where T<:Matrix{Float64}:
Regularized Canonical Correlation Analysis using SVD.
"""
function cca{T<:Matrix{Float64}}(v::Array{Float64}, X::T,Y::T)
n,p = size(X)
function cca(v::Array{Float64}, X::T,Y::T) where T<:Matrix{Float64}
# n,p = size(X)
q = size(Y)[2]
Ix = (10.0^v[1])*eye(p)
Iy = (10.0^v[2])*eye(q)

Cxx_fac_inv = (cov(X)+Ix)^-0.5
Cyy_fac_inv= if q>1 (cov(Y)+Iy)^-0.5
else cov(Y)^(-0.5)
end
M = cov(X*Cxx_fac_inv, Y*Cyy_fac_inv)
Cxx_fac_inv = (Statistics.cov(X)+(10.0^v[1])*I)^-0.5
Cyy_fac_inv = (q>1) ? (Statistics.cov(Y)+(10.0^v[2])*I)^-0.5 : Statistics.cov(Y)^(-0.5)
M = Statistics.cov(X*Cxx_fac_inv, Y*Cyy_fac_inv)
U, D, V = svd(M)

Wx = Cxx_fac_inv * U
Expand All @@ -168,7 +165,7 @@ function cca{T<:Matrix{Float64}}(v::Array{Float64}, X::T,Y::T)
end


cca{T<:Matrix{Float64}}(v::Array{Float64}, X::T,Y::T, kpars::KernelParameters) = cca(v, X, Y)
cca(v::Array{Float64}, X::T,Y::T, kpars::KernelParameters) where {T<:Matrix{Float64}} = cca(v, X, Y)


"""
Expand All @@ -180,8 +177,8 @@ function gKCCA(par::Array{Float64}, X::Matrix{Float64}, Y::Matrix{Float64}, kpar
n, p = size(X)
∇g = gradvecfield(par, X, Y, kpars)
M = mapreduce(x -> x*x', +, ∇g)/n
ev = eigfact(Symmetric(M))
Wx = flipdim(ev.vectors, 2)
values, vectors = eig(Symmetric(M))
Wx = Compat.reverse(vectors, dims=2)
R = X * Wx

dmin = minimum([p q 3])
Expand All @@ -194,7 +191,7 @@ function gKCCA(par::Array{Float64}, X::Matrix{Float64}, Y::Matrix{Float64}, kpar
T[:,j] = cc.T[:,1]
end

return ModelObj(Wx, R, Ay, T, flipdim(ev.values,1), par, "gKCCA")
return ModelObj(Wx, R, Ay, T, reverse(values), par, "gKCCA")
end


Expand All @@ -210,19 +207,19 @@ function whiten(X::Matrix{Float64}, d::Float64; lat=nothing)
if lat != nothing
scale!(m1, 1.0./sqrt(cos(pi*lat/180)) )
end
sv = svdfact(m1)
l = cumsum(sv[:S].^2)/sum(abs2, sv[:S])
U = sv[:U][:, l.≤d]
U /= std(U[:,1])
U,S,V = svd(m1)
l = cumsum(S.^2)/sum(abs2, S)
U = U[:, l.≤d]
U /= Statistics.std(U[:,1])
return U
end


"""
CVfn{T<:Matrix{Float64}}(parm::T, X::T, Y::T, modelfn::Function, kerneltype::DataType; verbose::Bool=true, dcv::Int64=2)
CVfn(parm::T, X::T, Y::T, modelfn::Function, kerneltype::DataType; verbose::Bool=true, dcv::Int64=2) where T<:Matrix{Float64}
Cross-validation function
"""
function CVfn{T<:Matrix{Float64}}(parm::T, X::T, Y::T, modelfn::Function, kerneltype::DataType; verbose::Bool=true, dcv::Int64=2)
function CVfn(parm::T, X::T, Y::T, modelfn::Function, kerneltype::DataType; verbose::Bool=true, dcv::Int64=2) where T<:Matrix{Float64}

# free parameters
# df = par[3] # Number of segments in PWLM
Expand Down Expand Up @@ -277,10 +274,10 @@ function CVfn{T<:Matrix{Float64}}(parm::T, X::T, Y::T, modelfn::Function, kernel
end

"""
Rsq_adj{T<:Array{Float64}}(Tx::T, Ty::T, df::Int):
Rsq_adj(Tx::T, Ty::T, df::Int) where T<:Array{Float64}:
Cross-validation metric
"""
function Rsq_adj{T<:Array{Float64}}(Tx::T, Ty::T, df::Int)
function Rsq_adj(Tx::T, Ty::T, df::Int) where {T<:Array{Float64}}
y = vec(Ty)
n,p = size(Tx)
o = ones(n*p)
Expand Down
Loading