Skip to content
This repository has been archived by the owner on May 27, 2021. It is now read-only.

Commit

Permalink
Merge pull request #496 from JuliaGPU/tb/runtime_ccall
Browse files Browse the repository at this point in the history
Use CUDAapi.at-runtime_ccall for calling into potentially missing libraries
  • Loading branch information
maleadt authored Nov 12, 2019
2 parents 93e5a76 + dd8ee75 commit 9d2737f
Show file tree
Hide file tree
Showing 3 changed files with 62 additions and 76 deletions.
28 changes: 12 additions & 16 deletions Manifest.toml
Original file line number Diff line number Diff line change
Expand Up @@ -16,21 +16,17 @@ version = "0.2.0"

[[CUDAapi]]
deps = ["Libdl", "Logging"]
git-tree-sha1 = "e063efb91cfefd7e6afd92c435d01398107a500b"
git-tree-sha1 = "6eee47385c81ed3b3f716b745697869c712c2df3"
uuid = "3895d2a7-ec45-59b8-82bb-cfc6a382f9b3"
version = "1.2.0"
version = "2.0.0"

[[CUDAdrv]]
deps = ["CEnum", "Printf"]
git-tree-sha1 = "90fa52c4acb2fadf7be48b0d73d9865c16ab9908"
deps = ["CEnum", "CUDAapi", "Printf"]
git-tree-sha1 = "c41a9c87604c2afd2cf89c4ed4aa3bf86e7c6a82"
repo-rev = "master"
repo-url = "https://github.com/JuliaGPU/CUDAdrv.jl.git"
uuid = "c5f51814-7f29-56b8-a69c-e4d8f6be1fde"
version = "4.0.1"

[[Crayons]]
deps = ["Test"]
git-tree-sha1 = "f621b8ef51fd2004c7cf157ea47f027fdeac5523"
uuid = "a8cc5b0e-0ffa-5ad4-8c14-923d3ee1735f"
version = "4.0.0"
version = "4.0.3"

[[DataStructures]]
deps = ["InteractiveUtils", "OrderedCollections"]
Expand All @@ -48,9 +44,9 @@ uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240"

[[LLVM]]
deps = ["CEnum", "Libdl", "Printf", "Unicode"]
git-tree-sha1 = "3680605a77f20bec59eea00389eb7aafe973abbb"
git-tree-sha1 = "74fe444b8b6d1ac01d639b2f9eaf395bcc2e24fc"
uuid = "929cbde3-209d-540e-8aea-75f648917ca0"
version = "1.3.1"
version = "1.3.2"

[[Libdl]]
uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb"
Expand Down Expand Up @@ -91,10 +87,10 @@ deps = ["Distributed", "InteractiveUtils", "Logging", "Random"]
uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40"

[[TimerOutputs]]
deps = ["Crayons", "Printf", "Test", "Unicode"]
git-tree-sha1 = "b80671c06f8f8bae08c55d67b5ce292c5ae2660c"
deps = ["Printf"]
git-tree-sha1 = "d9c67bd7ac89aafa75037307331d050998bb5a96"
uuid = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f"
version = "0.5.0"
version = "0.5.1"

[[Unicode]]
uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5"
2 changes: 1 addition & 1 deletion Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ TimerOutputs = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f"
[compat]
Adapt = "0.4, 1.0"
CEnum = "0.2"
CUDAapi = "0.6, 1.0"
CUDAapi = "2.0"
CUDAdrv = "4.0.1"
DataStructures = "0.15, 0.16, 0.17"
LLVM = "1.2"
Expand Down
108 changes: 49 additions & 59 deletions src/CUDAnative.jl
Original file line number Diff line number Diff line change
Expand Up @@ -55,86 +55,76 @@ functional() = __initialized__[]

function __init__()
try
# barrier to avoid compiling `ccall`s to unavailable libraries
inferencebarrier(__hidden_init__)()
__initialized__[] = true
catch ex
# don't actually fail to keep the package loadable
@debug("CUDAnative.jl failed to initialize; the package will not be functional.",
exception=(ex, catch_backtrace()))
end
end
## target support

if VERSION >= v"1.3.0-DEV.35"
using Base: inferencebarrier
else
inferencebarrier(@nospecialize(x)) = Ref{Any}(x)[]
end
# LLVM.jl

function __hidden_init__()
CUDAdrv.functional() || error("CUDAdrv.jl is not functional")
llvm_version = LLVM.version()
llvm_targets, llvm_isas = llvm_support(llvm_version)


## target support
# Julia

# LLVM.jl
julia_llvm_version = Base.libllvm_version
if julia_llvm_version != llvm_version
error("LLVM $llvm_version incompatible with Julia's LLVM $julia_llvm_version")
end

llvm_version = LLVM.version()
llvm_targets, llvm_isas = llvm_support(llvm_version)

# CUDA

# Julia
toolkit_dirs = find_toolkit()
cuda_toolkit_version = find_toolkit_version(toolkit_dirs)
if cuda_toolkit_version <= v"9"
@warn "CUDAnative.jl only supports CUDA 9.0 or higher (your toolkit provides CUDA $(version()))"
end

julia_llvm_version = Base.libllvm_version
if julia_llvm_version != llvm_version
error("LLVM $llvm_version incompatible with Julia's LLVM $julia_llvm_version")
end
cuda_targets, cuda_isas = cuda_support(CUDAdrv.version(), cuda_toolkit_version)

target_support[] = sort(collect(llvm_targets cuda_targets))
isempty(target_support[]) && error("Your toolchain does not support any device target")

# CUDA
ptx_support[] = sort(collect(llvm_isas cuda_isas))
isempty(ptx_support[]) && error("Your toolchain does not support any PTX ISA")

toolkit_dirs = find_toolkit()
cuda_toolkit_version = find_toolkit_version(toolkit_dirs)
if cuda_toolkit_version <= v"9"
@warn "CUDAnative.jl only supports CUDA 9.0 or higher (your toolkit provides CUDA $(version()))"
end
@debug("CUDAnative supports devices $(verlist(target_support[])); PTX $(verlist(ptx_support[]))")

cuda_targets, cuda_isas = cuda_support(CUDAdrv.version(), cuda_toolkit_version)
let val = find_libdevice(target_support[], toolkit_dirs)
val === nothing && error("Your CUDA installation does not provide libdevice")
libdevice[] = val
end

target_support[] = sort(collect(llvm_targets cuda_targets))
isempty(target_support[]) && error("Your toolchain does not support any device target")
let val = find_libcudadevrt(toolkit_dirs)
val === nothing && error("Your CUDA installation does not provide libcudadevrt")
libcudadevrt[] = val
end

ptx_support[] = sort(collect(llvm_isas cuda_isas))
isempty(ptx_support[]) && error("Your toolchain does not support any PTX ISA")
let val = find_cuda_binary("nvdisasm", toolkit_dirs)
val === nothing && error("Your CUDA installation does not provide the nvdisasm binary")
nvdisasm[] = val
end

@debug("CUDAnative supports devices $(verlist(target_support[])); PTX $(verlist(ptx_support[]))")
let val = find_cuda_binary("ptxas", toolkit_dirs)
val === nothing && error("Your CUDA installation does not provide the ptxas binary")
ptxas[] = val
end

let val = find_libdevice(target_support[], toolkit_dirs)
val === nothing && error("Your CUDA installation does not provide libdevice")
libdevice[] = val
end

let val = find_libcudadevrt(toolkit_dirs)
val === nothing && error("Your CUDA installation does not provide libcudadevrt")
libcudadevrt[] = val
end
## actual initialization

let val = find_cuda_binary("nvdisasm", toolkit_dirs)
val === nothing && error("Your CUDA installation does not provide the nvdisasm binary")
nvdisasm[] = val
end

let val = find_cuda_binary("ptxas", toolkit_dirs)
val === nothing && error("Your CUDA installation does not provide the ptxas binary")
ptxas[] = val
end


## actual initialization
__init_compiler__()

__init_compiler__()
CUDAdrv.apicall_hook[] = maybe_initialize

CUDAdrv.apicall_hook[] = maybe_initialize
__initialized__[] = true
catch ex
# don't actually fail to keep the package loadable
@debug begin
@error("Error thrown during package initialization",
exception=(ex, catch_backtrace()))
"CUDAnative.jl failed to initialize; the package will not be functional."
end
end
end

verlist(vers) = join(map(ver->"$(ver.major).$(ver.minor)", sort(collect(vers))), ", ", " and ")
Expand Down

0 comments on commit 9d2737f

Please sign in to comment.