From 5961872637b81322579b4535c893ec57addd8662 Mon Sep 17 00:00:00 2001 From: galipremsagar Date: Wed, 25 Mar 2020 10:44:56 -0700 Subject: [PATCH 01/21] add GPU support, runtime & driver checks --- python/cudf/cudf/__init__.py | 51 +++++++++++++++++++++++++++++++++++- python/cudf/cudf/errors.py | 9 +++++++ 2 files changed, 59 insertions(+), 1 deletion(-) create mode 100644 python/cudf/cudf/errors.py diff --git a/python/cudf/cudf/__init__.py b/python/cudf/cudf/__init__.py index 4ed51db9c5f..07e08e15896 100644 --- a/python/cudf/cudf/__init__.py +++ b/python/cudf/cudf/__init__.py @@ -1,9 +1,58 @@ # Copyright (c) 2018-2019, NVIDIA CORPORATION. +""" __init__.py + isort:skip_file +""" import cupy -import rmm +from cudf.errors import UnSupportedGPUError, UnSupportedCUDAError + +gpus_count = cupy.cuda.runtime.getDeviceCount() + + +if gpus_count > 0: + for device in range(0, gpus_count): + # cudaDevAttrComputeCapabilityMajor - 75 + major_version = cupy.cuda.runtime.deviceGetAttribute(75, device) + + if major_version >= 6: + # You have a GPU with NVIDIA Pascal™ architecture or better + pass + else: + raise UnSupportedGPUError( + "You will need a GPU with NVIDIA Pascal™ architecture or better" + ) + + cuda_runtime_version = cupy.cuda.runtime.runtimeGetVersion() + if cuda_runtime_version > 10000: + # CUDA Runtime Version Check: Runtime version is greater than 10000 + pass + else: + raise UnSupportedCUDAError( + "Please update your CUDA Runtime to 10.0 or above" + ) + + cuda_driver_version = cupy.cuda.runtime.driverGetVersion() + + if cuda_driver_version == 0: + raise UnSupportedCUDAError("Please install CUDA Driver") + elif cuda_driver_version >= cuda_runtime_version: + # CUDA Driver Version Check: Driver Runtime version is >= Runtime version + pass + else: + raise UnSupportedCUDAError( + "Please update your CUDA Driver to 10.0 or above" + ) + +else: + import warnings + + warnings.warn( + "You donot have an NVIDIA GPU, please install one and try again" + ) + +import rmm from cudf import core, datasets from cudf._version import get_versions from cudf.core import DataFrame, Index, MultiIndex, Series, from_pandas, merge diff --git a/python/cudf/cudf/errors.py b/python/cudf/cudf/errors.py new file mode 100644 index 00000000000..8a31afab9cf --- /dev/null +++ b/python/cudf/cudf/errors.py @@ -0,0 +1,9 @@ +# Copyright (c) 2020, NVIDIA CORPORATION. + + +class UnSupportedGPUError(Exception): + pass + + +class UnSupportedCUDAError(Exception): + pass From e92c6b57408d466d9f29679d45b1bea887a65d88 Mon Sep 17 00:00:00 2001 From: GALI PREM SAGAR Date: Wed, 25 Mar 2020 13:36:00 -0500 Subject: [PATCH 02/21] Update python/cudf/cudf/__init__.py Co-Authored-By: Jake Hemstad --- python/cudf/cudf/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/cudf/cudf/__init__.py b/python/cudf/cudf/__init__.py index 07e08e15896..eae466efd9c 100644 --- a/python/cudf/cudf/__init__.py +++ b/python/cudf/cudf/__init__.py @@ -42,7 +42,7 @@ pass else: raise UnSupportedCUDAError( - "Please update your CUDA Driver to 10.0 or above" + "Please update your NVIDIA GPU Driver version to >=410.48" ) else: From 9617fb93a1b38bf5ce08f0d3d00f889621be4191 Mon Sep 17 00:00:00 2001 From: galipremsagar Date: Wed, 25 Mar 2020 13:02:31 -0700 Subject: [PATCH 03/21] change error message to provide driver versions and url to compatibility guide --- python/cudf/cudf/__init__.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/python/cudf/cudf/__init__.py b/python/cudf/cudf/__init__.py index eae466efd9c..e0cdaa379b4 100644 --- a/python/cudf/cudf/__init__.py +++ b/python/cudf/cudf/__init__.py @@ -42,7 +42,11 @@ pass else: raise UnSupportedCUDAError( - "Please update your NVIDIA GPU Driver version to >=410.48" + "Please update your NVIDIA GPU Driver to support CUDA Runtime.\n" + "Detected CUDA Runtime version : " + + str(cuda_runtime_version) + + "\n" + "Detected NVIDIA GPU Driver version : " + str(cuda_driver_version) ) else: From 070953239a72a669d9f53f48efd85960ac70c381 Mon Sep 17 00:00:00 2001 From: galipremsagar Date: Wed, 25 Mar 2020 13:10:29 -0700 Subject: [PATCH 04/21] modify error text --- python/cudf/cudf/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/python/cudf/cudf/__init__.py b/python/cudf/cudf/__init__.py index e0cdaa379b4..985ef3a8c07 100644 --- a/python/cudf/cudf/__init__.py +++ b/python/cudf/cudf/__init__.py @@ -46,7 +46,8 @@ "Detected CUDA Runtime version : " + str(cuda_runtime_version) + "\n" - "Detected NVIDIA GPU Driver version : " + str(cuda_driver_version) + "Latest version of CUDA supported by current NVIDIA GPU Driver : " + + str(cuda_driver_version) ) else: From a61ab8e24b78eb9c6f38d73e7d7100fd59bfc7ea Mon Sep 17 00:00:00 2001 From: GALI PREM SAGAR Date: Wed, 25 Mar 2020 15:16:06 -0500 Subject: [PATCH 05/21] Update python/cudf/cudf/__init__.py Co-Authored-By: Jake Hemstad --- python/cudf/cudf/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/cudf/cudf/__init__.py b/python/cudf/cudf/__init__.py index 985ef3a8c07..74acb214872 100644 --- a/python/cudf/cudf/__init__.py +++ b/python/cudf/cudf/__init__.py @@ -42,7 +42,7 @@ pass else: raise UnSupportedCUDAError( - "Please update your NVIDIA GPU Driver to support CUDA Runtime.\n" + "The detected driver version does not support the detected CUDA Runtime version. Please update your NVIDIA GPU Driver.\n" "Detected CUDA Runtime version : " + str(cuda_runtime_version) + "\n" From d1f1717faf6de8162748e8d5615ad2e17f5b9630 Mon Sep 17 00:00:00 2001 From: galipremsagar Date: Thu, 26 Mar 2020 13:46:04 -0700 Subject: [PATCH 06/21] add cpp apis and cython/python bridge --- cpp/CMakeLists.txt | 1 + cpp/include/cudf/utilities/device.hpp | 60 ++++++++++++++++++ cpp/src/utilities/device.cu | 87 +++++++++++++++++++++++++++ python/cudf/cudf/__init__.py | 54 +---------------- python/cudf/cudf/utils/gpu.pxd | 9 +++ python/cudf/cudf/utils/gpu.pyx | 29 +++++++++ python/cudf/cudf/utils/gpu_utils.py | 74 +++++++++++++++++++++++ 7 files changed, 263 insertions(+), 51 deletions(-) create mode 100644 cpp/include/cudf/utilities/device.hpp create mode 100644 cpp/src/utilities/device.cu create mode 100644 python/cudf/cudf/utils/gpu.pxd create mode 100644 python/cudf/cudf/utils/gpu.pyx create mode 100644 python/cudf/cudf/utils/gpu_utils.py diff --git a/cpp/CMakeLists.txt b/cpp/CMakeLists.txt index 96d512f9975..09ee75b8f35 100644 --- a/cpp/CMakeLists.txt +++ b/cpp/CMakeLists.txt @@ -546,6 +546,7 @@ add_library(cudf src/utilities/legacy/error_utils.cpp src/utilities/nvtx/nvtx_utils.cpp src/utilities/nvtx/legacy/nvtx_utils.cpp + src/utilities/device.cu src/copying/copy.cpp src/copying/scatter.cu src/copying/shift.cu diff --git a/cpp/include/cudf/utilities/device.hpp b/cpp/include/cudf/utilities/device.hpp new file mode 100644 index 00000000000..7b99d1ff81a --- /dev/null +++ b/cpp/include/cudf/utilities/device.hpp @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2020, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + + +namespace cudf { +namespace experimental { + +/** + * @brief Returns the version number of the current CUDA Runtime instance. + * The version is returned as (1000 major + 10 minor). For example, + * CUDA 9.2 would be represented by 9020. + * + * This function returns -1 if runtime version is NULL. + * + * @return Integer containing the version of current CUDA Runtime. + */ +int get_cuda_runtime_version(); + + +/** + * @brief Returns the number of devices with compute capability greater or + * equal to 2.0 that are available for execution. + * + * This function returns -1 if NULL device pointer is assigned. + * + * @return Integer containing the number of compute-capable devices. + */ +int get_gpu_device_count(); + + +/** + * @brief Returns in the latest version of CUDA supported by the driver. + * The version is returned as (1000 major + 10 minor). For example, + * CUDA 9.2 would be represented by 9020. If no driver is installed, + * then 0 is returned as the driver version. + * + * This function returns -1 if driver version is NULL. + * + * @return Integer containing the latest version of CUDA supported by the driver. + */ + +int get_cuda_latest_supported_driver_version(); + +} // namespace experimental +} // namespace cudf diff --git a/cpp/src/utilities/device.cu b/cpp/src/utilities/device.cu new file mode 100644 index 00000000000..66dfe3ed996 --- /dev/null +++ b/cpp/src/utilities/device.cu @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2020, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +namespace cudf { +namespace experimental { + +/** + * @brief Returns the version number of the current CUDA Runtime instance. + * The version is returned as (1000 major + 10 minor). For example, + * CUDA 9.2 would be represented by 9020. + * + * This function returns -1 if runtime version is NULL. + * + * @return Integer containing the version of current CUDA Runtime. + */ +int get_cuda_runtime_version() { + int runtimeVersion; + cudaError_t status; + status = cudaRuntimeGetVersion(&runtimeVersion); + if (status != cudaSuccess) { + // If there is no GPU / any issues with the run time + // like driver initialization or Insufficient driver. + return -1; + } + return runtimeVersion; +} + +/** + * @brief Returns the number of devices with compute capability greater or + * equal to 2.0 that are available for execution. + * + * This function returns -1 if NULL device pointer is assigned. + * + * @return Integer containing the number of compute-capable devices. + */ +int get_gpu_device_count() { + int deviceCount; + cudaError_t status; + status = cudaGetDeviceCount(&deviceCount); + if (status != cudaSuccess) { + // If there is no GPU / any issues with the run time + // like driver initialization or Insufficient driver. + return -1; + } + return deviceCount; +} + +/** + * @brief Returns in the latest version of CUDA supported by the driver. + * The version is returned as (1000 major + 10 minor). For example, + * CUDA 9.2 would be represented by 9020. If no driver is installed, + * then 0 is returned as the driver version. + * + * This function returns -1 if driver version is NULL. + * + * @return Integer containing the latest version of CUDA supported by the driver. + */ +int get_cuda_latest_supported_driver_version() { + int driverVersion; + cudaError_t status; + status = cudaDriverGetVersion(&driverVersion); + if (status != cudaSuccess) { + // If there is no GPU / any issues with the run time + // like driver initialization or Insufficient driver. + return -1; + } + return driverVersion; +} + +} // namespace experimental +} // namespace cudf diff --git a/python/cudf/cudf/__init__.py b/python/cudf/cudf/__init__.py index 985ef3a8c07..c73c657cba8 100644 --- a/python/cudf/cudf/__init__.py +++ b/python/cudf/cudf/__init__.py @@ -3,60 +3,12 @@ isort:skip_file """ -import cupy - -from cudf.errors import UnSupportedGPUError, UnSupportedCUDAError - -gpus_count = cupy.cuda.runtime.getDeviceCount() - - -if gpus_count > 0: - for device in range(0, gpus_count): - # cudaDevAttrComputeCapabilityMajor - 75 - major_version = cupy.cuda.runtime.deviceGetAttribute(75, device) - - if major_version >= 6: - # You have a GPU with NVIDIA Pascal™ architecture or better - pass - else: - raise UnSupportedGPUError( - "You will need a GPU with NVIDIA Pascal™ architecture or better" - ) - cuda_runtime_version = cupy.cuda.runtime.runtimeGetVersion() +from cudf.utils.gpu_utils import validate_setup - if cuda_runtime_version > 10000: - # CUDA Runtime Version Check: Runtime version is greater than 10000 - pass - else: - raise UnSupportedCUDAError( - "Please update your CUDA Runtime to 10.0 or above" - ) - - cuda_driver_version = cupy.cuda.runtime.driverGetVersion() - - if cuda_driver_version == 0: - raise UnSupportedCUDAError("Please install CUDA Driver") - elif cuda_driver_version >= cuda_runtime_version: - # CUDA Driver Version Check: Driver Runtime version is >= Runtime version - pass - else: - raise UnSupportedCUDAError( - "Please update your NVIDIA GPU Driver to support CUDA Runtime.\n" - "Detected CUDA Runtime version : " - + str(cuda_runtime_version) - + "\n" - "Latest version of CUDA supported by current NVIDIA GPU Driver : " - + str(cuda_driver_version) - ) - -else: - import warnings - - warnings.warn( - "You donot have an NVIDIA GPU, please install one and try again" - ) +validate_setup() +import cupy import rmm from cudf import core, datasets from cudf._version import get_versions diff --git a/python/cudf/cudf/utils/gpu.pxd b/python/cudf/cudf/utils/gpu.pxd new file mode 100644 index 00000000000..978b8efa182 --- /dev/null +++ b/python/cudf/cudf/utils/gpu.pxd @@ -0,0 +1,9 @@ +# Copyright (c) 2020, NVIDIA CORPORATION. + + +cdef extern from "cudf/utilities/device.hpp" namespace \ + "cudf::experimental" nogil: + + cdef int get_cuda_runtime_version() except + + cdef int get_gpu_device_count() except + + cdef int get_cuda_latest_supported_driver_version() except + diff --git a/python/cudf/cudf/utils/gpu.pyx b/python/cudf/cudf/utils/gpu.pyx new file mode 100644 index 00000000000..e991cca0750 --- /dev/null +++ b/python/cudf/cudf/utils/gpu.pyx @@ -0,0 +1,29 @@ +# Copyright (c) 2020, NVIDIA CORPORATION. + +from cudf.utils.gpu cimport ( + get_cuda_runtime_version as cpp_get_cuda_runtime_version, + get_gpu_device_count as cpp_get_gpu_device_count, + get_cuda_latest_supported_driver_version as + cpp_get_cuda_latest_supported_driver_version +) + + +def get_cuda_runtime_version(): + cdef int c_result + with nogil: + c_result = cpp_get_cuda_runtime_version() + return c_result + + +def get_gpu_device_count(): + cdef int c_result + with nogil: + c_result = cpp_get_gpu_device_count() + return c_result + + +def get_cuda_latest_supported_driver_version(): + cdef int c_result + with nogil: + c_result = cpp_get_cuda_latest_supported_driver_version() + return c_result diff --git a/python/cudf/cudf/utils/gpu_utils.py b/python/cudf/cudf/utils/gpu_utils.py new file mode 100644 index 00000000000..ef4d668ba33 --- /dev/null +++ b/python/cudf/cudf/utils/gpu_utils.py @@ -0,0 +1,74 @@ +def validate_setup(): + from .gpu import get_gpu_device_count + + gpus_count = get_gpu_device_count() + + if gpus_count > 0: + # Cupy throws RunTimeException to get GPU count, + # hence obtaining GPU count by in-house cpp api above + import cupy + + # 75 - Indicates to get "cudaDevAttrComputeCapabilityMajor" attribute + # 0 - Get GPU 0 + major_version = cupy.cuda.runtime.deviceGetAttribute(75, 0) + + if major_version >= 6: + # You have a GPU with NVIDIA Pascal™ architecture or better + # Hardware Generation Compute Capability + # Turing 7.5 + # Volta 7.x + # Pascal 6.x + # Maxwell 5.x + # Kepler 3.x + # Fermi 2.x + pass + else: + from cudf.errors import UnSupportedGPUError + + raise UnSupportedGPUError( + "You will need a GPU with NVIDIA Pascal™ architecture or \ + better" + ) + + cuda_runtime_version = cupy.cuda.runtime.runtimeGetVersion() + + if cuda_runtime_version > 10000: + # CUDA Runtime Version Check: Runtime version is greater than 10000 + pass + else: + from cudf.errors import UnSupportedCUDAError + + raise UnSupportedCUDAError( + "Please update your CUDA Runtime to 10.0 or above" + ) + + cuda_driver_version = cupy.cuda.runtime.driverGetVersion() + + if cuda_driver_version == 0: + from cudf.errors import UnSupportedCUDAError + + raise UnSupportedCUDAError("Please install CUDA Driver") + elif cuda_driver_version >= cuda_runtime_version: + # CUDA Driver Version Check: + # Driver Runtime version is >= Runtime version + pass + else: + from cudf.errors import UnSupportedCUDAError + + raise UnSupportedCUDAError( + "Please update your NVIDIA GPU Driver to support CUDA \ + Runtime.\n" + "Detected CUDA Runtime version : " + + str(cuda_runtime_version) + + "\n" + "Latest version of CUDA \ + supported by current NVIDIA GPU Driver : " + + str(cuda_driver_version) + ) + + else: + import warnings + + warnings.warn( + "You donot have an NVIDIA GPU, please install one and try again" + ) From bc55a750722b572530ca426d2e2f5fe97c7e865c Mon Sep 17 00:00:00 2001 From: GALI PREM SAGAR Date: Thu, 26 Mar 2020 15:50:21 -0500 Subject: [PATCH 07/21] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f40d9fc552d..308d30cbdd0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,6 +19,7 @@ - PR #4548 Remove string_view is_null method - PR #4645 Add Alias for `kurtosis` as `kurt` - PR #4616 Enable different RMM allocation modes in unit tests +- PR #4692 Add GPU and CUDA validations ## Bug Fixes From 8d1482b4661a9e6697ce8f4bde4bd9b135574a81 Mon Sep 17 00:00:00 2001 From: GALI PREM SAGAR Date: Thu, 26 Mar 2020 16:09:30 -0500 Subject: [PATCH 08/21] Update python/cudf/cudf/utils/gpu_utils.py Co-Authored-By: Keith Kraus --- python/cudf/cudf/utils/gpu_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/cudf/cudf/utils/gpu_utils.py b/python/cudf/cudf/utils/gpu_utils.py index ef4d668ba33..4f1f565ed04 100644 --- a/python/cudf/cudf/utils/gpu_utils.py +++ b/python/cudf/cudf/utils/gpu_utils.py @@ -18,7 +18,7 @@ def validate_setup(): # Turing 7.5 # Volta 7.x # Pascal 6.x - # Maxwell 5.x + # Maxwell 5.x # Kepler 3.x # Fermi 2.x pass From 2e05d96a0318b16d97d232729836ddece89bbb69 Mon Sep 17 00:00:00 2001 From: galipremsagar Date: Thu, 26 Mar 2020 18:38:23 -0700 Subject: [PATCH 09/21] create a new module _cuda to keep all cuda related apis --- cpp/CMakeLists.txt | 1 - cpp/include/cudf/utilities/device.hpp | 60 - cpp/src/utilities/device.cu | 87 - python/cudf/cudf/_cuda/__init__.py | 0 python/cudf/cudf/_cuda/gpu.cpp | 5207 +++++++++++++++++++++++++ python/cudf/cudf/_cuda/gpu.pxd | 114 + python/cudf/cudf/_cuda/gpu.pyx | 272 ++ python/cudf/cudf/utils/gpu.pxd | 9 - python/cudf/cudf/utils/gpu.pyx | 29 - python/cudf/cudf/utils/gpu_utils.py | 50 +- 10 files changed, 5629 insertions(+), 200 deletions(-) delete mode 100644 cpp/include/cudf/utilities/device.hpp delete mode 100644 cpp/src/utilities/device.cu create mode 100644 python/cudf/cudf/_cuda/__init__.py create mode 100644 python/cudf/cudf/_cuda/gpu.cpp create mode 100644 python/cudf/cudf/_cuda/gpu.pxd create mode 100644 python/cudf/cudf/_cuda/gpu.pyx delete mode 100644 python/cudf/cudf/utils/gpu.pxd delete mode 100644 python/cudf/cudf/utils/gpu.pyx diff --git a/cpp/CMakeLists.txt b/cpp/CMakeLists.txt index 033a6bfb42f..53ce358ccc1 100644 --- a/cpp/CMakeLists.txt +++ b/cpp/CMakeLists.txt @@ -546,7 +546,6 @@ add_library(cudf src/utilities/legacy/error_utils.cpp src/utilities/nvtx/nvtx_utils.cpp src/utilities/nvtx/legacy/nvtx_utils.cpp - src/utilities/device.cu src/copying/copy.cpp src/copying/scatter.cu src/copying/shift.cu diff --git a/cpp/include/cudf/utilities/device.hpp b/cpp/include/cudf/utilities/device.hpp deleted file mode 100644 index 7b99d1ff81a..00000000000 --- a/cpp/include/cudf/utilities/device.hpp +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright (c) 2020, NVIDIA CORPORATION. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - - -namespace cudf { -namespace experimental { - -/** - * @brief Returns the version number of the current CUDA Runtime instance. - * The version is returned as (1000 major + 10 minor). For example, - * CUDA 9.2 would be represented by 9020. - * - * This function returns -1 if runtime version is NULL. - * - * @return Integer containing the version of current CUDA Runtime. - */ -int get_cuda_runtime_version(); - - -/** - * @brief Returns the number of devices with compute capability greater or - * equal to 2.0 that are available for execution. - * - * This function returns -1 if NULL device pointer is assigned. - * - * @return Integer containing the number of compute-capable devices. - */ -int get_gpu_device_count(); - - -/** - * @brief Returns in the latest version of CUDA supported by the driver. - * The version is returned as (1000 major + 10 minor). For example, - * CUDA 9.2 would be represented by 9020. If no driver is installed, - * then 0 is returned as the driver version. - * - * This function returns -1 if driver version is NULL. - * - * @return Integer containing the latest version of CUDA supported by the driver. - */ - -int get_cuda_latest_supported_driver_version(); - -} // namespace experimental -} // namespace cudf diff --git a/cpp/src/utilities/device.cu b/cpp/src/utilities/device.cu deleted file mode 100644 index 66dfe3ed996..00000000000 --- a/cpp/src/utilities/device.cu +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright (c) 2020, NVIDIA CORPORATION. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include - -namespace cudf { -namespace experimental { - -/** - * @brief Returns the version number of the current CUDA Runtime instance. - * The version is returned as (1000 major + 10 minor). For example, - * CUDA 9.2 would be represented by 9020. - * - * This function returns -1 if runtime version is NULL. - * - * @return Integer containing the version of current CUDA Runtime. - */ -int get_cuda_runtime_version() { - int runtimeVersion; - cudaError_t status; - status = cudaRuntimeGetVersion(&runtimeVersion); - if (status != cudaSuccess) { - // If there is no GPU / any issues with the run time - // like driver initialization or Insufficient driver. - return -1; - } - return runtimeVersion; -} - -/** - * @brief Returns the number of devices with compute capability greater or - * equal to 2.0 that are available for execution. - * - * This function returns -1 if NULL device pointer is assigned. - * - * @return Integer containing the number of compute-capable devices. - */ -int get_gpu_device_count() { - int deviceCount; - cudaError_t status; - status = cudaGetDeviceCount(&deviceCount); - if (status != cudaSuccess) { - // If there is no GPU / any issues with the run time - // like driver initialization or Insufficient driver. - return -1; - } - return deviceCount; -} - -/** - * @brief Returns in the latest version of CUDA supported by the driver. - * The version is returned as (1000 major + 10 minor). For example, - * CUDA 9.2 would be represented by 9020. If no driver is installed, - * then 0 is returned as the driver version. - * - * This function returns -1 if driver version is NULL. - * - * @return Integer containing the latest version of CUDA supported by the driver. - */ -int get_cuda_latest_supported_driver_version() { - int driverVersion; - cudaError_t status; - status = cudaDriverGetVersion(&driverVersion); - if (status != cudaSuccess) { - // If there is no GPU / any issues with the run time - // like driver initialization or Insufficient driver. - return -1; - } - return driverVersion; -} - -} // namespace experimental -} // namespace cudf diff --git a/python/cudf/cudf/_cuda/__init__.py b/python/cudf/cudf/_cuda/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/python/cudf/cudf/_cuda/gpu.cpp b/python/cudf/cudf/_cuda/gpu.cpp new file mode 100644 index 00000000000..cca7eaedef8 --- /dev/null +++ b/python/cudf/cudf/_cuda/gpu.cpp @@ -0,0 +1,5207 @@ +/* Generated by Cython 0.29.15 */ + +/* BEGIN: Cython Metadata +{ + "distutils": { + "depends": [ + "/usr/local/cuda/include/cuda.h", + "/usr/local/cuda/include/cuda_runtime_api.h" + ], + "extra_compile_args": [ + "-std=c++14" + ], + "include_dirs": [ + "../../cpp/include/cudf", + "../../cpp/include", + "../../cpp/build/include", + "../../thirdparty/cub", + "../../thirdparty/libcudacxx/include", + "/conda/envs/cudf/include", + "/conda/envs/cudf/lib/python3.7/site-packages/numpy/core/include", + "/usr/local/cuda/include" + ], + "language": "c++", + "libraries": [ + "cudf" + ], + "library_dirs": [ + "/conda/envs/cudf/lib/python3.7/site-packages", + "/conda/envs/cudf/lib" + ], + "name": "cudf._cuda.gpu", + "sources": [ + "cudf/_cuda/gpu.pyx" + ] + }, + "module_name": "cudf._cuda.gpu" +} +END: Cython Metadata */ + +#define PY_SSIZE_T_CLEAN +#include "Python.h" +#ifndef Py_PYTHON_H + #error Python headers needed to compile C extensions, please install development version of Python. +#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) + #error Cython requires Python 2.6+ or Python 3.3+. +#else +#define CYTHON_ABI "0_29_15" +#define CYTHON_HEX_VERSION 0x001D0FF0 +#define CYTHON_FUTURE_DIVISION 1 +#include +#ifndef offsetof + #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) +#endif +#if !defined(WIN32) && !defined(MS_WINDOWS) + #ifndef __stdcall + #define __stdcall + #endif + #ifndef __cdecl + #define __cdecl + #endif + #ifndef __fastcall + #define __fastcall + #endif +#endif +#ifndef DL_IMPORT + #define DL_IMPORT(t) t +#endif +#ifndef DL_EXPORT + #define DL_EXPORT(t) t +#endif +#define __PYX_COMMA , +#ifndef HAVE_LONG_LONG + #if PY_VERSION_HEX >= 0x02070000 + #define HAVE_LONG_LONG + #endif +#endif +#ifndef PY_LONG_LONG + #define PY_LONG_LONG LONG_LONG +#endif +#ifndef Py_HUGE_VAL + #define Py_HUGE_VAL HUGE_VAL +#endif +#ifdef PYPY_VERSION + #define CYTHON_COMPILING_IN_PYPY 1 + #define CYTHON_COMPILING_IN_PYSTON 0 + #define CYTHON_COMPILING_IN_CPYTHON 0 + #undef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 0 + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #if PY_VERSION_HEX < 0x03050000 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #elif !defined(CYTHON_USE_ASYNC_SLOTS) + #define CYTHON_USE_ASYNC_SLOTS 1 + #endif + #undef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 0 + #undef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 0 + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #undef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 1 + #undef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 0 + #undef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 0 + #undef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 0 + #undef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 0 + #undef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT 0 + #undef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE 0 + #undef CYTHON_USE_DICT_VERSIONS + #define CYTHON_USE_DICT_VERSIONS 0 + #undef CYTHON_USE_EXC_INFO_STACK + #define CYTHON_USE_EXC_INFO_STACK 0 +#elif defined(PYSTON_VERSION) + #define CYTHON_COMPILING_IN_PYPY 0 + #define CYTHON_COMPILING_IN_PYSTON 1 + #define CYTHON_COMPILING_IN_CPYTHON 0 + #ifndef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 1 + #endif + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #undef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 0 + #ifndef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 1 + #endif + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #ifndef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 0 + #endif + #ifndef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 1 + #endif + #ifndef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 1 + #endif + #undef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 0 + #undef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 0 + #undef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT 0 + #undef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE 0 + #undef CYTHON_USE_DICT_VERSIONS + #define CYTHON_USE_DICT_VERSIONS 0 + #undef CYTHON_USE_EXC_INFO_STACK + #define CYTHON_USE_EXC_INFO_STACK 0 +#else + #define CYTHON_COMPILING_IN_PYPY 0 + #define CYTHON_COMPILING_IN_PYSTON 0 + #define CYTHON_COMPILING_IN_CPYTHON 1 + #ifndef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 1 + #endif + #if PY_VERSION_HEX < 0x02070000 + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #elif !defined(CYTHON_USE_PYTYPE_LOOKUP) + #define CYTHON_USE_PYTYPE_LOOKUP 1 + #endif + #if PY_MAJOR_VERSION < 3 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #elif !defined(CYTHON_USE_ASYNC_SLOTS) + #define CYTHON_USE_ASYNC_SLOTS 1 + #endif + #if PY_VERSION_HEX < 0x02070000 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #elif !defined(CYTHON_USE_PYLONG_INTERNALS) + #define CYTHON_USE_PYLONG_INTERNALS 1 + #endif + #ifndef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 1 + #endif + #ifndef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 1 + #endif + #if PY_VERSION_HEX < 0x030300F0 + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #elif !defined(CYTHON_USE_UNICODE_WRITER) + #define CYTHON_USE_UNICODE_WRITER 1 + #endif + #ifndef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 0 + #endif + #ifndef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 1 + #endif + #ifndef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 1 + #endif + #ifndef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 1 + #endif + #ifndef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 1 + #endif + #ifndef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000) + #endif + #ifndef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1) + #endif + #ifndef CYTHON_USE_DICT_VERSIONS + #define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1) + #endif + #ifndef CYTHON_USE_EXC_INFO_STACK + #define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3) + #endif +#endif +#if !defined(CYTHON_FAST_PYCCALL) +#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) +#endif +#if CYTHON_USE_PYLONG_INTERNALS + #include "longintrepr.h" + #undef SHIFT + #undef BASE + #undef MASK + #ifdef SIZEOF_VOID_P + enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) }; + #endif +#endif +#ifndef __has_attribute + #define __has_attribute(x) 0 +#endif +#ifndef __has_cpp_attribute + #define __has_cpp_attribute(x) 0 +#endif +#ifndef CYTHON_RESTRICT + #if defined(__GNUC__) + #define CYTHON_RESTRICT __restrict__ + #elif defined(_MSC_VER) && _MSC_VER >= 1400 + #define CYTHON_RESTRICT __restrict + #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define CYTHON_RESTRICT restrict + #else + #define CYTHON_RESTRICT + #endif +#endif +#ifndef CYTHON_UNUSED +# if defined(__GNUC__) +# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) +# define CYTHON_UNUSED __attribute__ ((__unused__)) +# else +# define CYTHON_UNUSED +# endif +# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) +# define CYTHON_UNUSED __attribute__ ((__unused__)) +# else +# define CYTHON_UNUSED +# endif +#endif +#ifndef CYTHON_MAYBE_UNUSED_VAR +# if defined(__cplusplus) + template void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } +# else +# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x) +# endif +#endif +#ifndef CYTHON_NCP_UNUSED +# if CYTHON_COMPILING_IN_CPYTHON +# define CYTHON_NCP_UNUSED +# else +# define CYTHON_NCP_UNUSED CYTHON_UNUSED +# endif +#endif +#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) +#ifdef _MSC_VER + #ifndef _MSC_STDINT_H_ + #if _MSC_VER < 1300 + typedef unsigned char uint8_t; + typedef unsigned int uint32_t; + #else + typedef unsigned __int8 uint8_t; + typedef unsigned __int32 uint32_t; + #endif + #endif +#else + #include +#endif +#ifndef CYTHON_FALLTHROUGH + #if defined(__cplusplus) && __cplusplus >= 201103L + #if __has_cpp_attribute(fallthrough) + #define CYTHON_FALLTHROUGH [[fallthrough]] + #elif __has_cpp_attribute(clang::fallthrough) + #define CYTHON_FALLTHROUGH [[clang::fallthrough]] + #elif __has_cpp_attribute(gnu::fallthrough) + #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] + #endif + #endif + #ifndef CYTHON_FALLTHROUGH + #if __has_attribute(fallthrough) + #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) + #else + #define CYTHON_FALLTHROUGH + #endif + #endif + #if defined(__clang__ ) && defined(__apple_build_version__) + #if __apple_build_version__ < 7000000 + #undef CYTHON_FALLTHROUGH + #define CYTHON_FALLTHROUGH + #endif + #endif +#endif + +#ifndef __cplusplus + #error "Cython files generated with the C++ option must be compiled with a C++ compiler." +#endif +#ifndef CYTHON_INLINE + #if defined(__clang__) + #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) + #else + #define CYTHON_INLINE inline + #endif +#endif +template +void __Pyx_call_destructor(T& x) { + x.~T(); +} +template +class __Pyx_FakeReference { + public: + __Pyx_FakeReference() : ptr(NULL) { } + __Pyx_FakeReference(const T& ref) : ptr(const_cast(&ref)) { } + T *operator->() { return ptr; } + T *operator&() { return ptr; } + operator T&() { return *ptr; } + template bool operator ==(U other) { return *ptr == other; } + template bool operator !=(U other) { return *ptr != other; } + private: + T *ptr; +}; + +#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) + #define Py_OptimizeFlag 0 +#endif +#define __PYX_BUILD_PY_SSIZE_T "n" +#define CYTHON_FORMAT_SSIZE_T "z" +#if PY_MAJOR_VERSION < 3 + #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" + #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ + PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) + #define __Pyx_DefaultClassType PyClass_Type +#else + #define __Pyx_BUILTIN_MODULE_NAME "builtins" +#if PY_VERSION_HEX >= 0x030800A4 && PY_VERSION_HEX < 0x030800B2 + #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ + PyCode_New(a, 0, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) +#else + #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ + PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) +#endif + #define __Pyx_DefaultClassType PyType_Type +#endif +#ifndef Py_TPFLAGS_CHECKTYPES + #define Py_TPFLAGS_CHECKTYPES 0 +#endif +#ifndef Py_TPFLAGS_HAVE_INDEX + #define Py_TPFLAGS_HAVE_INDEX 0 +#endif +#ifndef Py_TPFLAGS_HAVE_NEWBUFFER + #define Py_TPFLAGS_HAVE_NEWBUFFER 0 +#endif +#ifndef Py_TPFLAGS_HAVE_FINALIZE + #define Py_TPFLAGS_HAVE_FINALIZE 0 +#endif +#ifndef METH_STACKLESS + #define METH_STACKLESS 0 +#endif +#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL) + #ifndef METH_FASTCALL + #define METH_FASTCALL 0x80 + #endif + typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs); + typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args, + Py_ssize_t nargs, PyObject *kwnames); +#else + #define __Pyx_PyCFunctionFast _PyCFunctionFast + #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords +#endif +#if CYTHON_FAST_PYCCALL +#define __Pyx_PyFastCFunction_Check(func)\ + ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))))) +#else +#define __Pyx_PyFastCFunction_Check(func) 0 +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) + #define PyObject_Malloc(s) PyMem_Malloc(s) + #define PyObject_Free(p) PyMem_Free(p) + #define PyObject_Realloc(p) PyMem_Realloc(p) +#endif +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1 + #define PyMem_RawMalloc(n) PyMem_Malloc(n) + #define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n) + #define PyMem_RawFree(p) PyMem_Free(p) +#endif +#if CYTHON_COMPILING_IN_PYSTON + #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) + #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) +#else + #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) + #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) +#endif +#if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000 + #define __Pyx_PyThreadState_Current PyThreadState_GET() +#elif PY_VERSION_HEX >= 0x03060000 + #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() +#elif PY_VERSION_HEX >= 0x03000000 + #define __Pyx_PyThreadState_Current PyThreadState_GET() +#else + #define __Pyx_PyThreadState_Current _PyThreadState_Current +#endif +#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT) +#include "pythread.h" +#define Py_tss_NEEDS_INIT 0 +typedef int Py_tss_t; +static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) { + *key = PyThread_create_key(); + return 0; +} +static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) { + Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t)); + *key = Py_tss_NEEDS_INIT; + return key; +} +static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) { + PyObject_Free(key); +} +static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) { + return *key != Py_tss_NEEDS_INIT; +} +static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) { + PyThread_delete_key(*key); + *key = Py_tss_NEEDS_INIT; +} +static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) { + return PyThread_set_key_value(*key, value); +} +static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { + return PyThread_get_key_value(*key); +} +#endif +#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) +#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) +#else +#define __Pyx_PyDict_NewPresized(n) PyDict_New() +#endif +#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION + #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) + #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) +#else + #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) + #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) +#endif +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS +#define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash) +#else +#define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name) +#endif +#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) + #define CYTHON_PEP393_ENABLED 1 + #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ + 0 : _PyUnicode_Ready((PyObject *)(op))) + #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) + #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) + #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) + #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) + #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) + #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) + #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) +#else + #define CYTHON_PEP393_ENABLED 0 + #define PyUnicode_1BYTE_KIND 1 + #define PyUnicode_2BYTE_KIND 2 + #define PyUnicode_4BYTE_KIND 4 + #define __Pyx_PyUnicode_READY(op) (0) + #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) + #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) + #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) + #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) + #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) + #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) + #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) +#endif +#if CYTHON_COMPILING_IN_PYPY + #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) + #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) +#else + #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) + #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ + PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) + #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) + #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) + #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) +#endif +#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) +#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) +#else + #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) +#endif +#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) + #define PyObject_ASCII(o) PyObject_Repr(o) +#endif +#if PY_MAJOR_VERSION >= 3 + #define PyBaseString_Type PyUnicode_Type + #define PyStringObject PyUnicodeObject + #define PyString_Type PyUnicode_Type + #define PyString_Check PyUnicode_Check + #define PyString_CheckExact PyUnicode_CheckExact + #define PyObject_Unicode PyObject_Str +#endif +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) + #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) +#else + #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) + #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) +#endif +#ifndef PySet_CheckExact + #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) +#endif +#if CYTHON_ASSUME_SAFE_MACROS + #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq) +#else + #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq) +#endif +#if PY_MAJOR_VERSION >= 3 + #define PyIntObject PyLongObject + #define PyInt_Type PyLong_Type + #define PyInt_Check(op) PyLong_Check(op) + #define PyInt_CheckExact(op) PyLong_CheckExact(op) + #define PyInt_FromString PyLong_FromString + #define PyInt_FromUnicode PyLong_FromUnicode + #define PyInt_FromLong PyLong_FromLong + #define PyInt_FromSize_t PyLong_FromSize_t + #define PyInt_FromSsize_t PyLong_FromSsize_t + #define PyInt_AsLong PyLong_AsLong + #define PyInt_AS_LONG PyLong_AS_LONG + #define PyInt_AsSsize_t PyLong_AsSsize_t + #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask + #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask + #define PyNumber_Int PyNumber_Long +#endif +#if PY_MAJOR_VERSION >= 3 + #define PyBoolObject PyLongObject +#endif +#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY + #ifndef PyUnicode_InternFromString + #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) + #endif +#endif +#if PY_VERSION_HEX < 0x030200A4 + typedef long Py_hash_t; + #define __Pyx_PyInt_FromHash_t PyInt_FromLong + #define __Pyx_PyInt_AsHash_t PyInt_AsLong +#else + #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t + #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t +#endif +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : (Py_INCREF(func), func)) +#else + #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) +#endif +#if CYTHON_USE_ASYNC_SLOTS + #if PY_VERSION_HEX >= 0x030500B1 + #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods + #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) + #else + #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) + #endif +#else + #define __Pyx_PyType_AsAsync(obj) NULL +#endif +#ifndef __Pyx_PyAsyncMethodsStruct + typedef struct { + unaryfunc am_await; + unaryfunc am_aiter; + unaryfunc am_anext; + } __Pyx_PyAsyncMethodsStruct; +#endif + +#if defined(WIN32) || defined(MS_WINDOWS) + #define _USE_MATH_DEFINES +#endif +#include +#ifdef NAN +#define __PYX_NAN() ((float) NAN) +#else +static CYTHON_INLINE float __PYX_NAN() { + float value; + memset(&value, 0xFF, sizeof(value)); + return value; +} +#endif +#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) +#define __Pyx_truncl trunc +#else +#define __Pyx_truncl truncl +#endif + + +#define __PYX_ERR(f_index, lineno, Ln_error) \ +{ \ + __pyx_filename = __pyx_f[f_index]; __pyx_lineno = lineno; __pyx_clineno = __LINE__; goto Ln_error; \ +} + +#ifndef __PYX_EXTERN_C + #ifdef __cplusplus + #define __PYX_EXTERN_C extern "C" + #else + #define __PYX_EXTERN_C extern + #endif +#endif + +#define __PYX_HAVE__cudf___cuda__gpu +#define __PYX_HAVE_API__cudf___cuda__gpu +/* Early includes */ +#include "cuda.h" +#include "cuda_runtime_api.h" +#ifdef _OPENMP +#include +#endif /* _OPENMP */ + +#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) +#define CYTHON_WITHOUT_ASSERTIONS +#endif + +typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; + const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; + +#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 +#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0 +#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8) +#define __PYX_DEFAULT_STRING_ENCODING "" +#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString +#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize +#define __Pyx_uchar_cast(c) ((unsigned char)c) +#define __Pyx_long_cast(x) ((long)x) +#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ + (sizeof(type) < sizeof(Py_ssize_t)) ||\ + (sizeof(type) > sizeof(Py_ssize_t) &&\ + likely(v < (type)PY_SSIZE_T_MAX ||\ + v == (type)PY_SSIZE_T_MAX) &&\ + (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ + v == (type)PY_SSIZE_T_MIN))) ||\ + (sizeof(type) == sizeof(Py_ssize_t) &&\ + (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ + v == (type)PY_SSIZE_T_MAX))) ) +static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) { + return (size_t) i < (size_t) limit; +} +#if defined (__cplusplus) && __cplusplus >= 201103L + #include + #define __Pyx_sst_abs(value) std::abs(value) +#elif SIZEOF_INT >= SIZEOF_SIZE_T + #define __Pyx_sst_abs(value) abs(value) +#elif SIZEOF_LONG >= SIZEOF_SIZE_T + #define __Pyx_sst_abs(value) labs(value) +#elif defined (_MSC_VER) + #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) +#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define __Pyx_sst_abs(value) llabs(value) +#elif defined (__GNUC__) + #define __Pyx_sst_abs(value) __builtin_llabs(value) +#else + #define __Pyx_sst_abs(value) ((value<0) ? -value : value) +#endif +static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); +static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); +#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) +#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) +#define __Pyx_PyBytes_FromString PyBytes_FromString +#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize +static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); +#if PY_MAJOR_VERSION < 3 + #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString + #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize +#else + #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString + #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize +#endif +#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) +#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) +#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) +#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) +#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) +static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { + const Py_UNICODE *u_end = u; + while (*u_end++) ; + return (size_t)(u_end - u - 1); +} +#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) +#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode +#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode +#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) +#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) +static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b); +static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); +static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*); +static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); +#define __Pyx_PySequence_Tuple(obj)\ + (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) +static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); +static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); +#if CYTHON_ASSUME_SAFE_MACROS +#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) +#else +#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) +#endif +#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) +#if PY_MAJOR_VERSION >= 3 +#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) +#else +#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) +#endif +#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) +#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII +static int __Pyx_sys_getdefaultencoding_not_ascii; +static int __Pyx_init_sys_getdefaultencoding_params(void) { + PyObject* sys; + PyObject* default_encoding = NULL; + PyObject* ascii_chars_u = NULL; + PyObject* ascii_chars_b = NULL; + const char* default_encoding_c; + sys = PyImport_ImportModule("sys"); + if (!sys) goto bad; + default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); + Py_DECREF(sys); + if (!default_encoding) goto bad; + default_encoding_c = PyBytes_AsString(default_encoding); + if (!default_encoding_c) goto bad; + if (strcmp(default_encoding_c, "ascii") == 0) { + __Pyx_sys_getdefaultencoding_not_ascii = 0; + } else { + char ascii_chars[128]; + int c; + for (c = 0; c < 128; c++) { + ascii_chars[c] = c; + } + __Pyx_sys_getdefaultencoding_not_ascii = 1; + ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); + if (!ascii_chars_u) goto bad; + ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); + if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { + PyErr_Format( + PyExc_ValueError, + "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", + default_encoding_c); + goto bad; + } + Py_DECREF(ascii_chars_u); + Py_DECREF(ascii_chars_b); + } + Py_DECREF(default_encoding); + return 0; +bad: + Py_XDECREF(default_encoding); + Py_XDECREF(ascii_chars_u); + Py_XDECREF(ascii_chars_b); + return -1; +} +#endif +#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 +#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) +#else +#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) +#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT +static char* __PYX_DEFAULT_STRING_ENCODING; +static int __Pyx_init_sys_getdefaultencoding_params(void) { + PyObject* sys; + PyObject* default_encoding = NULL; + char* default_encoding_c; + sys = PyImport_ImportModule("sys"); + if (!sys) goto bad; + default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); + Py_DECREF(sys); + if (!default_encoding) goto bad; + default_encoding_c = PyBytes_AsString(default_encoding); + if (!default_encoding_c) goto bad; + __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1); + if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; + strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); + Py_DECREF(default_encoding); + return 0; +bad: + Py_XDECREF(default_encoding); + return -1; +} +#endif +#endif + + +/* Test for GCC > 2.95 */ +#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) + #define likely(x) __builtin_expect(!!(x), 1) + #define unlikely(x) __builtin_expect(!!(x), 0) +#else /* !__GNUC__ or GCC < 2.95 */ + #define likely(x) (x) + #define unlikely(x) (x) +#endif /* __GNUC__ */ +static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } + +static PyObject *__pyx_m = NULL; +static PyObject *__pyx_d; +static PyObject *__pyx_b; +static PyObject *__pyx_cython_runtime = NULL; +static PyObject *__pyx_empty_tuple; +static PyObject *__pyx_empty_bytes; +static PyObject *__pyx_empty_unicode; +static int __pyx_lineno; +static int __pyx_clineno = 0; +static const char * __pyx_cfilenm= __FILE__; +static const char *__pyx_filename; + + +static const char *__pyx_f[] = { + "cudf/_cuda/gpu.pyx", +}; + +/* "cudf/_cuda/gpu.pxd":113 + * int cudaDeviceGetAttribute(int* value, cudaDeviceAttr attr, int device) + * + * ctypedef int underlying_type_attribute # <<<<<<<<<<<<<< + */ +typedef int __pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute; + +/*--- Type declarations ---*/ + +/* --- Runtime support code (head) --- */ +/* Refnanny.proto */ +#ifndef CYTHON_REFNANNY + #define CYTHON_REFNANNY 0 +#endif +#if CYTHON_REFNANNY + typedef struct { + void (*INCREF)(void*, PyObject*, int); + void (*DECREF)(void*, PyObject*, int); + void (*GOTREF)(void*, PyObject*, int); + void (*GIVEREF)(void*, PyObject*, int); + void* (*SetupContext)(const char*, int, const char*); + void (*FinishContext)(void**); + } __Pyx_RefNannyAPIStruct; + static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; + static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); + #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; +#ifdef WITH_THREAD + #define __Pyx_RefNannySetupContext(name, acquire_gil)\ + if (acquire_gil) {\ + PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ + PyGILState_Release(__pyx_gilstate_save);\ + } else {\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ + } +#else + #define __Pyx_RefNannySetupContext(name, acquire_gil)\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) +#endif + #define __Pyx_RefNannyFinishContext()\ + __Pyx_RefNanny->FinishContext(&__pyx_refnanny) + #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) + #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) + #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) + #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) +#else + #define __Pyx_RefNannyDeclarations + #define __Pyx_RefNannySetupContext(name, acquire_gil) + #define __Pyx_RefNannyFinishContext() + #define __Pyx_INCREF(r) Py_INCREF(r) + #define __Pyx_DECREF(r) Py_DECREF(r) + #define __Pyx_GOTREF(r) + #define __Pyx_GIVEREF(r) + #define __Pyx_XINCREF(r) Py_XINCREF(r) + #define __Pyx_XDECREF(r) Py_XDECREF(r) + #define __Pyx_XGOTREF(r) + #define __Pyx_XGIVEREF(r) +#endif +#define __Pyx_XDECREF_SET(r, v) do {\ + PyObject *tmp = (PyObject *) r;\ + r = v; __Pyx_XDECREF(tmp);\ + } while (0) +#define __Pyx_DECREF_SET(r, v) do {\ + PyObject *tmp = (PyObject *) r;\ + r = v; __Pyx_DECREF(tmp);\ + } while (0) +#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) +#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) + +/* RaiseArgTupleInvalid.proto */ +static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, + Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); + +/* RaiseDoubleKeywords.proto */ +static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); + +/* ParseKeywords.proto */ +static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ + PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ + const char* function_name); + +/* PyObjectGetAttrStr.proto */ +#if CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name); +#else +#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) +#endif + +/* Import.proto */ +static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); + +/* ImportFrom.proto */ +static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name); + +/* GetBuiltinName.proto */ +static PyObject *__Pyx_GetBuiltinName(PyObject *name); + +/* PyDictVersioning.proto */ +#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS +#define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1) +#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag) +#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\ + (version_var) = __PYX_GET_DICT_VERSION(dict);\ + (cache_var) = (value); +#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\ + static PY_UINT64_T __pyx_dict_version = 0;\ + static PyObject *__pyx_dict_cached_value = NULL;\ + if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\ + (VAR) = __pyx_dict_cached_value;\ + } else {\ + (VAR) = __pyx_dict_cached_value = (LOOKUP);\ + __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\ + }\ +} +static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj); +static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj); +static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version); +#else +#define __PYX_GET_DICT_VERSION(dict) (0) +#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var) +#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP); +#endif + +/* GetModuleGlobalName.proto */ +#if CYTHON_USE_DICT_VERSIONS +#define __Pyx_GetModuleGlobalName(var, name) {\ + static PY_UINT64_T __pyx_dict_version = 0;\ + static PyObject *__pyx_dict_cached_value = NULL;\ + (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\ + (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\ + __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ +} +#define __Pyx_GetModuleGlobalNameUncached(var, name) {\ + PY_UINT64_T __pyx_dict_version;\ + PyObject *__pyx_dict_cached_value;\ + (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ +} +static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value); +#else +#define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name) +#define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name) +static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name); +#endif + +/* CalculateMetaclass.proto */ +static PyObject *__Pyx_CalculateMetaclass(PyTypeObject *metaclass, PyObject *bases); + +/* SetNameInClass.proto */ +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 +#define __Pyx_SetNameInClass(ns, name, value)\ + (likely(PyDict_CheckExact(ns)) ? _PyDict_SetItem_KnownHash(ns, name, value, ((PyASCIIObject *) name)->hash) : PyObject_SetItem(ns, name, value)) +#elif CYTHON_COMPILING_IN_CPYTHON +#define __Pyx_SetNameInClass(ns, name, value)\ + (likely(PyDict_CheckExact(ns)) ? PyDict_SetItem(ns, name, value) : PyObject_SetItem(ns, name, value)) +#else +#define __Pyx_SetNameInClass(ns, name, value) PyObject_SetItem(ns, name, value) +#endif + +/* Py3ClassCreate.proto */ +static PyObject *__Pyx_Py3MetaclassPrepare(PyObject *metaclass, PyObject *bases, PyObject *name, PyObject *qualname, + PyObject *mkw, PyObject *modname, PyObject *doc); +static PyObject *__Pyx_Py3ClassCreate(PyObject *metaclass, PyObject *name, PyObject *bases, PyObject *dict, + PyObject *mkw, int calculate_metaclass, int allow_py2_metaclass); + +/* PyThreadStateGet.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; +#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; +#define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type +#else +#define __Pyx_PyThreadState_declare +#define __Pyx_PyThreadState_assign +#define __Pyx_PyErr_Occurred() PyErr_Occurred() +#endif + +/* PyErrFetchRestore.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) +#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) +#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) +#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) +#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); +static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#if CYTHON_COMPILING_IN_CPYTHON +#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) +#else +#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) +#endif +#else +#define __Pyx_PyErr_Clear() PyErr_Clear() +#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) +#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) +#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) +#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) +#endif + +/* CLineInTraceback.proto */ +#ifdef CYTHON_CLINE_IN_TRACEBACK +#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) +#else +static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); +#endif + +/* CodeObjectCache.proto */ +typedef struct { + PyCodeObject* code_object; + int code_line; +} __Pyx_CodeObjectCacheEntry; +struct __Pyx_CodeObjectCache { + int count; + int max_count; + __Pyx_CodeObjectCacheEntry* entries; +}; +static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; +static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); +static PyCodeObject *__pyx_find_code_object(int code_line); +static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); + +/* AddTraceback.proto */ +static void __Pyx_AddTraceback(const char *funcname, int c_line, + int py_line, const char *filename); + +/* CIntToPy.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); + +/* CIntFromPy.proto */ +static CYTHON_INLINE enum cudaDeviceAttr __Pyx_PyInt_As_enum__cudaDeviceAttr(PyObject *); + +/* CIntFromPy.proto */ +static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); + +/* CIntToPy.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); + +/* CIntFromPy.proto */ +static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); + +/* FastTypeChecks.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) +static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); +#else +#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) +#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) +#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) +#endif +#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) + +/* CheckBinaryVersion.proto */ +static int __Pyx_check_binary_version(void); + +/* InitStrings.proto */ +static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); + + +/* Module declarations from 'cudf._cuda.gpu' */ +static int __pyx_f_4cudf_5_cuda_3gpu_driverGetVersion(int __pyx_skip_dispatch); /*proto*/ +static int __pyx_f_4cudf_5_cuda_3gpu_runtimeGetVersion(int __pyx_skip_dispatch); /*proto*/ +static int __pyx_f_4cudf_5_cuda_3gpu_getDeviceCount(int __pyx_skip_dispatch); /*proto*/ +static int __pyx_f_4cudf_5_cuda_3gpu_getDeviceAttribute(PyObject *, PyObject *, int __pyx_skip_dispatch); /*proto*/ +#define __Pyx_MODULE_NAME "cudf._cuda.gpu" +extern int __pyx_module_is_main_cudf___cuda__gpu; +int __pyx_module_is_main_cudf___cuda__gpu = 0; + +/* Implementation of 'cudf._cuda.gpu' */ +static const char __pyx_k_doc[] = "__doc__"; +static const char __pyx_k_attr[] = "attr"; +static const char __pyx_k_enum[] = "enum"; +static const char __pyx_k_main[] = "__main__"; +static const char __pyx_k_name[] = "__name__"; +static const char __pyx_k_test[] = "__test__"; +static const char __pyx_k_device[] = "device"; +static const char __pyx_k_import[] = "__import__"; +static const char __pyx_k_module[] = "__module__"; +static const char __pyx_k_IntEnum[] = "IntEnum"; +static const char __pyx_k_prepare[] = "__prepare__"; +static const char __pyx_k_qualname[] = "__qualname__"; +static const char __pyx_k_metaclass[] = "__metaclass__"; +static const char __pyx_k_CudaDeviceAttr[] = "CudaDeviceAttr"; +static const char __pyx_k_cudf__cuda_gpu[] = "cudf._cuda.gpu"; +static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; +static const char __pyx_k_cudaDevAttrMaxPitch[] = "cudaDevAttrMaxPitch"; +static const char __pyx_k_cudaDevAttrPciBusId[] = "cudaDevAttrPciBusId"; +static const char __pyx_k_cudaDevAttrWarpSize[] = "cudaDevAttrWarpSize"; +static const char __pyx_k_cudaDevAttrClockRate[] = "cudaDevAttrClockRate"; +static const char __pyx_k_cudaDevAttrTccDriver[] = "cudaDevAttrTccDriver"; +static const char __pyx_k_cudaDevAttrEccEnabled[] = "cudaDevAttrEccEnabled"; +static const char __pyx_k_cudaDevAttrGpuOverlap[] = "cudaDevAttrGpuOverlap"; +static const char __pyx_k_cudaDevAttrIntegrated[] = "cudaDevAttrIntegrated"; +static const char __pyx_k_cudaDevAttrReserved92[] = "cudaDevAttrReserved92"; +static const char __pyx_k_cudaDevAttrReserved93[] = "cudaDevAttrReserved93"; +static const char __pyx_k_cudaDevAttrReserved94[] = "cudaDevAttrReserved94"; +static const char __pyx_k_cudaDevAttrComputeMode[] = "cudaDevAttrComputeMode"; +static const char __pyx_k_cudaDevAttrL2CacheSize[] = "cudaDevAttrL2CacheSize"; +static const char __pyx_k_cudaDevAttrMaxGridDimX[] = "cudaDevAttrMaxGridDimX"; +static const char __pyx_k_cudaDevAttrMaxGridDimY[] = "cudaDevAttrMaxGridDimY"; +static const char __pyx_k_cudaDevAttrMaxGridDimZ[] = "cudaDevAttrMaxGridDimZ"; +static const char __pyx_k_cudaDevAttrPciDeviceId[] = "cudaDevAttrPciDeviceId"; +static const char __pyx_k_cudaDevAttrPciDomainId[] = "cudaDevAttrPciDomainId"; +static const char __pyx_k_cudaDevAttrMaxBlockDimX[] = "cudaDevAttrMaxBlockDimX"; +static const char __pyx_k_cudaDevAttrMaxBlockDimY[] = "cudaDevAttrMaxBlockDimY"; +static const char __pyx_k_cudaDevAttrMaxBlockDimZ[] = "cudaDevAttrMaxBlockDimZ"; +static const char __pyx_k_cudaDevAttrManagedMemory[] = "cudaDevAttrManagedMemory"; +static const char __pyx_k_cudaDevAttrIsMultiGpuBoard[] = "cudaDevAttrIsMultiGpuBoard"; +static const char __pyx_k_cudaDevAttrMemoryClockRate[] = "cudaDevAttrMemoryClockRate"; +static const char __pyx_k_cudaDevAttrAsyncEngineCount[] = "cudaDevAttrAsyncEngineCount"; +static const char __pyx_k_cudaDevAttrCanMapHostMemory[] = "cudaDevAttrCanMapHostMemory"; +static const char __pyx_k_cudaDevAttrSurfaceAlignment[] = "cudaDevAttrSurfaceAlignment"; +static const char __pyx_k_cudaDevAttrTextureAlignment[] = "cudaDevAttrTextureAlignment"; +static const char __pyx_k_cudaDevAttrConcurrentKernels[] = "cudaDevAttrConcurrentKernels"; +static const char __pyx_k_cudaDevAttrCooperativeLaunch[] = "cudaDevAttrCooperativeLaunch"; +static const char __pyx_k_cudaDevAttrKernelExecTimeout[] = "cudaDevAttrKernelExecTimeout"; +static const char __pyx_k_cudaDevAttrMaxSurface1DWidth[] = "cudaDevAttrMaxSurface1DWidth"; +static const char __pyx_k_cudaDevAttrMaxSurface2DWidth[] = "cudaDevAttrMaxSurface2DWidth"; +static const char __pyx_k_cudaDevAttrMaxSurface3DDepth[] = "cudaDevAttrMaxSurface3DDepth"; +static const char __pyx_k_cudaDevAttrMaxSurface3DWidth[] = "cudaDevAttrMaxSurface3DWidth"; +static const char __pyx_k_cudaDevAttrMaxTexture1DWidth[] = "cudaDevAttrMaxTexture1DWidth"; +static const char __pyx_k_cudaDevAttrMaxTexture2DWidth[] = "cudaDevAttrMaxTexture2DWidth"; +static const char __pyx_k_cudaDevAttrMaxTexture3DDepth[] = "cudaDevAttrMaxTexture3DDepth"; +static const char __pyx_k_cudaDevAttrMaxTexture3DWidth[] = "cudaDevAttrMaxTexture3DWidth"; +static const char __pyx_k_cudaDevAttrUnifiedAddressing[] = "cudaDevAttrUnifiedAddressing"; +static const char __pyx_k_cudaDevAttrMaxSurface2DHeight[] = "cudaDevAttrMaxSurface2DHeight"; +static const char __pyx_k_cudaDevAttrMaxSurface3DHeight[] = "cudaDevAttrMaxSurface3DHeight"; +static const char __pyx_k_cudaDevAttrMaxTexture2DHeight[] = "cudaDevAttrMaxTexture2DHeight"; +static const char __pyx_k_cudaDevAttrMaxTexture3DHeight[] = "cudaDevAttrMaxTexture3DHeight"; +static const char __pyx_k_cudaDevAttrMaxThreadsPerBlock[] = "cudaDevAttrMaxThreadsPerBlock"; +static const char __pyx_k_cudaDevAttrMultiProcessorCount[] = "cudaDevAttrMultiProcessorCount"; +static const char __pyx_k_cudaDevAttrTotalConstantMemory[] = "cudaDevAttrTotalConstantMemory"; +static const char __pyx_k_cudaDevAttrCanFlushRemoteWrites[] = "cudaDevAttrCanFlushRemoteWrites"; +static const char __pyx_k_cudaDevAttrGlobalMemoryBusWidth[] = "cudaDevAttrGlobalMemoryBusWidth"; +static const char __pyx_k_cudaDevAttrMaxRegistersPerBlock[] = "cudaDevAttrMaxRegistersPerBlock"; +static const char __pyx_k_cudaDevAttrMaxTexture3DDepthAlt[] = "cudaDevAttrMaxTexture3DDepthAlt"; +static const char __pyx_k_cudaDevAttrMaxTexture3DWidthAlt[] = "cudaDevAttrMaxTexture3DWidthAlt"; +static const char __pyx_k_cudaDevAttrMultiGpuBoardGroupID[] = "cudaDevAttrMultiGpuBoardGroupID"; +static const char __pyx_k_cudaDevAttrPageableMemoryAccess[] = "cudaDevAttrPageableMemoryAccess"; +static const char __pyx_k_cudaDevAttrCanUseHostPointerForR[] = "cudaDevAttrCanUseHostPointerForRegisteredMem"; +static const char __pyx_k_cudaDevAttrComputeCapabilityMajo[] = "cudaDevAttrComputeCapabilityMajor"; +static const char __pyx_k_cudaDevAttrComputeCapabilityMino[] = "cudaDevAttrComputeCapabilityMinor"; +static const char __pyx_k_cudaDevAttrComputePreemptionSupp[] = "cudaDevAttrComputePreemptionSupported"; +static const char __pyx_k_cudaDevAttrConcurrentManagedAcce[] = "cudaDevAttrConcurrentManagedAccess"; +static const char __pyx_k_cudaDevAttrCooperativeMultiDevic[] = "cudaDevAttrCooperativeMultiDeviceLaunch"; +static const char __pyx_k_cudaDevAttrDirectManagedMemAcces[] = "cudaDevAttrDirectManagedMemAccessFromHost"; +static const char __pyx_k_cudaDevAttrGlobalL1CacheSupporte[] = "cudaDevAttrGlobalL1CacheSupported"; +static const char __pyx_k_cudaDevAttrHostNativeAtomicSuppo[] = "cudaDevAttrHostNativeAtomicSupported"; +static const char __pyx_k_cudaDevAttrHostRegisterSupported[] = "cudaDevAttrHostRegisterSupported"; +static const char __pyx_k_cudaDevAttrLocalL1CacheSupported[] = "cudaDevAttrLocalL1CacheSupported"; +static const char __pyx_k_cudaDevAttrMaxRegistersPerMultip[] = "cudaDevAttrMaxRegistersPerMultiprocessor"; +static const char __pyx_k_cudaDevAttrMaxSharedMemoryPerBlo[] = "cudaDevAttrMaxSharedMemoryPerBlock"; +static const char __pyx_k_cudaDevAttrMaxSharedMemoryPerMul[] = "cudaDevAttrMaxSharedMemoryPerMultiprocessor"; +static const char __pyx_k_cudaDevAttrMaxSurface1DLayeredLa[] = "cudaDevAttrMaxSurface1DLayeredLayers"; +static const char __pyx_k_cudaDevAttrMaxSurface1DLayeredWi[] = "cudaDevAttrMaxSurface1DLayeredWidth"; +static const char __pyx_k_cudaDevAttrMaxSurface2DLayeredHe[] = "cudaDevAttrMaxSurface2DLayeredHeight"; +static const char __pyx_k_cudaDevAttrMaxSurface2DLayeredLa[] = "cudaDevAttrMaxSurface2DLayeredLayers"; +static const char __pyx_k_cudaDevAttrMaxSurface2DLayeredWi[] = "cudaDevAttrMaxSurface2DLayeredWidth"; +static const char __pyx_k_cudaDevAttrMaxSurfaceCubemapLaye[] = "cudaDevAttrMaxSurfaceCubemapLayeredWidth"; +static const char __pyx_k_cudaDevAttrMaxSurfaceCubemapWidt[] = "cudaDevAttrMaxSurfaceCubemapWidth"; +static const char __pyx_k_cudaDevAttrMaxTexture1DLayeredLa[] = "cudaDevAttrMaxTexture1DLayeredLayers"; +static const char __pyx_k_cudaDevAttrMaxTexture1DLayeredWi[] = "cudaDevAttrMaxTexture1DLayeredWidth"; +static const char __pyx_k_cudaDevAttrMaxTexture1DLinearWid[] = "cudaDevAttrMaxTexture1DLinearWidth"; +static const char __pyx_k_cudaDevAttrMaxTexture1DMipmapped[] = "cudaDevAttrMaxTexture1DMipmappedWidth"; +static const char __pyx_k_cudaDevAttrMaxTexture2DGatherHei[] = "cudaDevAttrMaxTexture2DGatherHeight"; +static const char __pyx_k_cudaDevAttrMaxTexture2DGatherWid[] = "cudaDevAttrMaxTexture2DGatherWidth"; +static const char __pyx_k_cudaDevAttrMaxTexture2DLayeredHe[] = "cudaDevAttrMaxTexture2DLayeredHeight"; +static const char __pyx_k_cudaDevAttrMaxTexture2DLayeredLa[] = "cudaDevAttrMaxTexture2DLayeredLayers"; +static const char __pyx_k_cudaDevAttrMaxTexture2DLayeredWi[] = "cudaDevAttrMaxTexture2DLayeredWidth"; +static const char __pyx_k_cudaDevAttrMaxTexture2DLinearHei[] = "cudaDevAttrMaxTexture2DLinearHeight"; +static const char __pyx_k_cudaDevAttrMaxTexture2DLinearPit[] = "cudaDevAttrMaxTexture2DLinearPitch"; +static const char __pyx_k_cudaDevAttrMaxTexture2DLinearWid[] = "cudaDevAttrMaxTexture2DLinearWidth"; +static const char __pyx_k_cudaDevAttrMaxTexture2DMipmapped[] = "cudaDevAttrMaxTexture2DMipmappedWidth"; +static const char __pyx_k_cudaDevAttrMaxTexture3DHeightAlt[] = "cudaDevAttrMaxTexture3DHeightAlt"; +static const char __pyx_k_cudaDevAttrMaxTextureCubemapLaye[] = "cudaDevAttrMaxTextureCubemapLayeredWidth"; +static const char __pyx_k_cudaDevAttrMaxTextureCubemapWidt[] = "cudaDevAttrMaxTextureCubemapWidth"; +static const char __pyx_k_cudaDevAttrMaxThreadsPerMultiPro[] = "cudaDevAttrMaxThreadsPerMultiProcessor"; +static const char __pyx_k_cudaDevAttrPageableMemoryAccessU[] = "cudaDevAttrPageableMemoryAccessUsesHostPageTables"; +static const char __pyx_k_cudaDevAttrSingleToDoublePrecisi[] = "cudaDevAttrSingleToDoublePrecisionPerfRatio"; +static const char __pyx_k_cudaDevAttrStreamPrioritiesSuppo[] = "cudaDevAttrStreamPrioritiesSupported"; +static const char __pyx_k_cudaDevAttrTexturePitchAlignment[] = "cudaDevAttrTexturePitchAlignment"; +static const char __pyx_k_cudaDevAttrMaxSharedMemoryPerBlo_2[] = "cudaDevAttrMaxSharedMemoryPerBlockOptin"; +static const char __pyx_k_cudaDevAttrMaxSurfaceCubemapLaye_2[] = "cudaDevAttrMaxSurfaceCubemapLayeredLayers"; +static const char __pyx_k_cudaDevAttrMaxTexture2DMipmapped_2[] = "cudaDevAttrMaxTexture2DMipmappedHeight"; +static const char __pyx_k_cudaDevAttrMaxTextureCubemapLaye_2[] = "cudaDevAttrMaxTextureCubemapLayeredLayers"; +static PyObject *__pyx_n_s_CudaDeviceAttr; +static PyObject *__pyx_n_s_IntEnum; +static PyObject *__pyx_n_s_attr; +static PyObject *__pyx_n_s_cline_in_traceback; +static PyObject *__pyx_n_s_cudaDevAttrAsyncEngineCount; +static PyObject *__pyx_n_s_cudaDevAttrCanFlushRemoteWrites; +static PyObject *__pyx_n_s_cudaDevAttrCanMapHostMemory; +static PyObject *__pyx_n_s_cudaDevAttrCanUseHostPointerForR; +static PyObject *__pyx_n_s_cudaDevAttrClockRate; +static PyObject *__pyx_n_s_cudaDevAttrComputeCapabilityMajo; +static PyObject *__pyx_n_s_cudaDevAttrComputeCapabilityMino; +static PyObject *__pyx_n_s_cudaDevAttrComputeMode; +static PyObject *__pyx_n_s_cudaDevAttrComputePreemptionSupp; +static PyObject *__pyx_n_s_cudaDevAttrConcurrentKernels; +static PyObject *__pyx_n_s_cudaDevAttrConcurrentManagedAcce; +static PyObject *__pyx_n_s_cudaDevAttrCooperativeLaunch; +static PyObject *__pyx_n_s_cudaDevAttrCooperativeMultiDevic; +static PyObject *__pyx_n_s_cudaDevAttrDirectManagedMemAcces; +static PyObject *__pyx_n_s_cudaDevAttrEccEnabled; +static PyObject *__pyx_n_s_cudaDevAttrGlobalL1CacheSupporte; +static PyObject *__pyx_n_s_cudaDevAttrGlobalMemoryBusWidth; +static PyObject *__pyx_n_s_cudaDevAttrGpuOverlap; +static PyObject *__pyx_n_s_cudaDevAttrHostNativeAtomicSuppo; +static PyObject *__pyx_n_s_cudaDevAttrHostRegisterSupported; +static PyObject *__pyx_n_s_cudaDevAttrIntegrated; +static PyObject *__pyx_n_s_cudaDevAttrIsMultiGpuBoard; +static PyObject *__pyx_n_s_cudaDevAttrKernelExecTimeout; +static PyObject *__pyx_n_s_cudaDevAttrL2CacheSize; +static PyObject *__pyx_n_s_cudaDevAttrLocalL1CacheSupported; +static PyObject *__pyx_n_s_cudaDevAttrManagedMemory; +static PyObject *__pyx_n_s_cudaDevAttrMaxBlockDimX; +static PyObject *__pyx_n_s_cudaDevAttrMaxBlockDimY; +static PyObject *__pyx_n_s_cudaDevAttrMaxBlockDimZ; +static PyObject *__pyx_n_s_cudaDevAttrMaxGridDimX; +static PyObject *__pyx_n_s_cudaDevAttrMaxGridDimY; +static PyObject *__pyx_n_s_cudaDevAttrMaxGridDimZ; +static PyObject *__pyx_n_s_cudaDevAttrMaxPitch; +static PyObject *__pyx_n_s_cudaDevAttrMaxRegistersPerBlock; +static PyObject *__pyx_n_s_cudaDevAttrMaxRegistersPerMultip; +static PyObject *__pyx_n_s_cudaDevAttrMaxSharedMemoryPerBlo; +static PyObject *__pyx_n_s_cudaDevAttrMaxSharedMemoryPerBlo_2; +static PyObject *__pyx_n_s_cudaDevAttrMaxSharedMemoryPerMul; +static PyObject *__pyx_n_s_cudaDevAttrMaxSurface1DLayeredLa; +static PyObject *__pyx_n_s_cudaDevAttrMaxSurface1DLayeredWi; +static PyObject *__pyx_n_s_cudaDevAttrMaxSurface1DWidth; +static PyObject *__pyx_n_s_cudaDevAttrMaxSurface2DHeight; +static PyObject *__pyx_n_s_cudaDevAttrMaxSurface2DLayeredHe; +static PyObject *__pyx_n_s_cudaDevAttrMaxSurface2DLayeredLa; +static PyObject *__pyx_n_s_cudaDevAttrMaxSurface2DLayeredWi; +static PyObject *__pyx_n_s_cudaDevAttrMaxSurface2DWidth; +static PyObject *__pyx_n_s_cudaDevAttrMaxSurface3DDepth; +static PyObject *__pyx_n_s_cudaDevAttrMaxSurface3DHeight; +static PyObject *__pyx_n_s_cudaDevAttrMaxSurface3DWidth; +static PyObject *__pyx_n_s_cudaDevAttrMaxSurfaceCubemapLaye; +static PyObject *__pyx_n_s_cudaDevAttrMaxSurfaceCubemapLaye_2; +static PyObject *__pyx_n_s_cudaDevAttrMaxSurfaceCubemapWidt; +static PyObject *__pyx_n_s_cudaDevAttrMaxTexture1DLayeredLa; +static PyObject *__pyx_n_s_cudaDevAttrMaxTexture1DLayeredWi; +static PyObject *__pyx_n_s_cudaDevAttrMaxTexture1DLinearWid; +static PyObject *__pyx_n_s_cudaDevAttrMaxTexture1DMipmapped; +static PyObject *__pyx_n_s_cudaDevAttrMaxTexture1DWidth; +static PyObject *__pyx_n_s_cudaDevAttrMaxTexture2DGatherHei; +static PyObject *__pyx_n_s_cudaDevAttrMaxTexture2DGatherWid; +static PyObject *__pyx_n_s_cudaDevAttrMaxTexture2DHeight; +static PyObject *__pyx_n_s_cudaDevAttrMaxTexture2DLayeredHe; +static PyObject *__pyx_n_s_cudaDevAttrMaxTexture2DLayeredLa; +static PyObject *__pyx_n_s_cudaDevAttrMaxTexture2DLayeredWi; +static PyObject *__pyx_n_s_cudaDevAttrMaxTexture2DLinearHei; +static PyObject *__pyx_n_s_cudaDevAttrMaxTexture2DLinearPit; +static PyObject *__pyx_n_s_cudaDevAttrMaxTexture2DLinearWid; +static PyObject *__pyx_n_s_cudaDevAttrMaxTexture2DMipmapped; +static PyObject *__pyx_n_s_cudaDevAttrMaxTexture2DMipmapped_2; +static PyObject *__pyx_n_s_cudaDevAttrMaxTexture2DWidth; +static PyObject *__pyx_n_s_cudaDevAttrMaxTexture3DDepth; +static PyObject *__pyx_n_s_cudaDevAttrMaxTexture3DDepthAlt; +static PyObject *__pyx_n_s_cudaDevAttrMaxTexture3DHeight; +static PyObject *__pyx_n_s_cudaDevAttrMaxTexture3DHeightAlt; +static PyObject *__pyx_n_s_cudaDevAttrMaxTexture3DWidth; +static PyObject *__pyx_n_s_cudaDevAttrMaxTexture3DWidthAlt; +static PyObject *__pyx_n_s_cudaDevAttrMaxTextureCubemapLaye; +static PyObject *__pyx_n_s_cudaDevAttrMaxTextureCubemapLaye_2; +static PyObject *__pyx_n_s_cudaDevAttrMaxTextureCubemapWidt; +static PyObject *__pyx_n_s_cudaDevAttrMaxThreadsPerBlock; +static PyObject *__pyx_n_s_cudaDevAttrMaxThreadsPerMultiPro; +static PyObject *__pyx_n_s_cudaDevAttrMemoryClockRate; +static PyObject *__pyx_n_s_cudaDevAttrMultiGpuBoardGroupID; +static PyObject *__pyx_n_s_cudaDevAttrMultiProcessorCount; +static PyObject *__pyx_n_s_cudaDevAttrPageableMemoryAccess; +static PyObject *__pyx_n_s_cudaDevAttrPageableMemoryAccessU; +static PyObject *__pyx_n_s_cudaDevAttrPciBusId; +static PyObject *__pyx_n_s_cudaDevAttrPciDeviceId; +static PyObject *__pyx_n_s_cudaDevAttrPciDomainId; +static PyObject *__pyx_n_s_cudaDevAttrReserved92; +static PyObject *__pyx_n_s_cudaDevAttrReserved93; +static PyObject *__pyx_n_s_cudaDevAttrReserved94; +static PyObject *__pyx_n_s_cudaDevAttrSingleToDoublePrecisi; +static PyObject *__pyx_n_s_cudaDevAttrStreamPrioritiesSuppo; +static PyObject *__pyx_n_s_cudaDevAttrSurfaceAlignment; +static PyObject *__pyx_n_s_cudaDevAttrTccDriver; +static PyObject *__pyx_n_s_cudaDevAttrTextureAlignment; +static PyObject *__pyx_n_s_cudaDevAttrTexturePitchAlignment; +static PyObject *__pyx_n_s_cudaDevAttrTotalConstantMemory; +static PyObject *__pyx_n_s_cudaDevAttrUnifiedAddressing; +static PyObject *__pyx_n_s_cudaDevAttrWarpSize; +static PyObject *__pyx_n_s_cudf__cuda_gpu; +static PyObject *__pyx_n_s_device; +static PyObject *__pyx_n_s_doc; +static PyObject *__pyx_n_s_enum; +static PyObject *__pyx_n_s_import; +static PyObject *__pyx_n_s_main; +static PyObject *__pyx_n_s_metaclass; +static PyObject *__pyx_n_s_module; +static PyObject *__pyx_n_s_name; +static PyObject *__pyx_n_s_prepare; +static PyObject *__pyx_n_s_qualname; +static PyObject *__pyx_n_s_test; +static PyObject *__pyx_pf_4cudf_5_cuda_3gpu_driverGetVersion(CYTHON_UNUSED PyObject *__pyx_self); /* proto */ +static PyObject *__pyx_pf_4cudf_5_cuda_3gpu_2runtimeGetVersion(CYTHON_UNUSED PyObject *__pyx_self); /* proto */ +static PyObject *__pyx_pf_4cudf_5_cuda_3gpu_4getDeviceCount(CYTHON_UNUSED PyObject *__pyx_self); /* proto */ +static PyObject *__pyx_pf_4cudf_5_cuda_3gpu_6getDeviceAttribute(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_attr, PyObject *__pyx_v_device); /* proto */ +/* Late includes */ + +/* "cudf/_cuda/gpu.pyx":117 + * + * + * cpdef int driverGetVersion() except? -1: # <<<<<<<<<<<<<< + * cdef int version + * status = cudaDriverGetVersion(&version) + */ + +static PyObject *__pyx_pw_4cudf_5_cuda_3gpu_1driverGetVersion(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static int __pyx_f_4cudf_5_cuda_3gpu_driverGetVersion(CYTHON_UNUSED int __pyx_skip_dispatch) { + int __pyx_v_version; + CYTHON_UNUSED int __pyx_v_status; + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("driverGetVersion", 0); + + /* "cudf/_cuda/gpu.pyx":119 + * cpdef int driverGetVersion() except? -1: + * cdef int version + * status = cudaDriverGetVersion(&version) # <<<<<<<<<<<<<< + * return version + * + */ + __pyx_v_status = cudaDriverGetVersion((&__pyx_v_version)); + + /* "cudf/_cuda/gpu.pyx":120 + * cdef int version + * status = cudaDriverGetVersion(&version) + * return version # <<<<<<<<<<<<<< + * + * + */ + __pyx_r = __pyx_v_version; + goto __pyx_L0; + + /* "cudf/_cuda/gpu.pyx":117 + * + * + * cpdef int driverGetVersion() except? -1: # <<<<<<<<<<<<<< + * cdef int version + * status = cudaDriverGetVersion(&version) + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* Python wrapper */ +static PyObject *__pyx_pw_4cudf_5_cuda_3gpu_1driverGetVersion(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static char __pyx_doc_4cudf_5_cuda_3gpu_driverGetVersion[] = "driverGetVersion() -> int"; +static PyObject *__pyx_pw_4cudf_5_cuda_3gpu_1driverGetVersion(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("driverGetVersion (wrapper)", 0); + __pyx_r = __pyx_pf_4cudf_5_cuda_3gpu_driverGetVersion(__pyx_self); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_4cudf_5_cuda_3gpu_driverGetVersion(CYTHON_UNUSED PyObject *__pyx_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + __Pyx_RefNannySetupContext("driverGetVersion", 0); + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __pyx_f_4cudf_5_cuda_3gpu_driverGetVersion(0); if (unlikely(__pyx_t_1 == ((int)-1) && PyErr_Occurred())) __PYX_ERR(0, 117, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 117, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_AddTraceback("cudf._cuda.gpu.driverGetVersion", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "cudf/_cuda/gpu.pyx":123 + * + * + * cpdef int runtimeGetVersion() except? -1: # <<<<<<<<<<<<<< + * cdef int version + * status = cudaRuntimeGetVersion(&version) + */ + +static PyObject *__pyx_pw_4cudf_5_cuda_3gpu_3runtimeGetVersion(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static int __pyx_f_4cudf_5_cuda_3gpu_runtimeGetVersion(CYTHON_UNUSED int __pyx_skip_dispatch) { + int __pyx_v_version; + CYTHON_UNUSED int __pyx_v_status; + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("runtimeGetVersion", 0); + + /* "cudf/_cuda/gpu.pyx":125 + * cpdef int runtimeGetVersion() except? -1: + * cdef int version + * status = cudaRuntimeGetVersion(&version) # <<<<<<<<<<<<<< + * return version + * + */ + __pyx_v_status = cudaRuntimeGetVersion((&__pyx_v_version)); + + /* "cudf/_cuda/gpu.pyx":126 + * cdef int version + * status = cudaRuntimeGetVersion(&version) + * return version # <<<<<<<<<<<<<< + * + * cpdef int getDeviceCount() except? -1: + */ + __pyx_r = __pyx_v_version; + goto __pyx_L0; + + /* "cudf/_cuda/gpu.pyx":123 + * + * + * cpdef int runtimeGetVersion() except? -1: # <<<<<<<<<<<<<< + * cdef int version + * status = cudaRuntimeGetVersion(&version) + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* Python wrapper */ +static PyObject *__pyx_pw_4cudf_5_cuda_3gpu_3runtimeGetVersion(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static char __pyx_doc_4cudf_5_cuda_3gpu_2runtimeGetVersion[] = "runtimeGetVersion() -> int"; +static PyObject *__pyx_pw_4cudf_5_cuda_3gpu_3runtimeGetVersion(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("runtimeGetVersion (wrapper)", 0); + __pyx_r = __pyx_pf_4cudf_5_cuda_3gpu_2runtimeGetVersion(__pyx_self); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_4cudf_5_cuda_3gpu_2runtimeGetVersion(CYTHON_UNUSED PyObject *__pyx_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + __Pyx_RefNannySetupContext("runtimeGetVersion", 0); + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __pyx_f_4cudf_5_cuda_3gpu_runtimeGetVersion(0); if (unlikely(__pyx_t_1 == ((int)-1) && PyErr_Occurred())) __PYX_ERR(0, 123, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 123, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_AddTraceback("cudf._cuda.gpu.runtimeGetVersion", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "cudf/_cuda/gpu.pyx":128 + * return version + * + * cpdef int getDeviceCount() except? -1: # <<<<<<<<<<<<<< + * cdef int count + * status = cudaGetDeviceCount(&count) + */ + +static PyObject *__pyx_pw_4cudf_5_cuda_3gpu_5getDeviceCount(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static int __pyx_f_4cudf_5_cuda_3gpu_getDeviceCount(CYTHON_UNUSED int __pyx_skip_dispatch) { + int __pyx_v_count; + CYTHON_UNUSED int __pyx_v_status; + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("getDeviceCount", 0); + + /* "cudf/_cuda/gpu.pyx":130 + * cpdef int getDeviceCount() except? -1: + * cdef int count + * status = cudaGetDeviceCount(&count) # <<<<<<<<<<<<<< + * return count + * + */ + __pyx_v_status = cudaGetDeviceCount((&__pyx_v_count)); + + /* "cudf/_cuda/gpu.pyx":131 + * cdef int count + * status = cudaGetDeviceCount(&count) + * return count # <<<<<<<<<<<<<< + * + * cpdef int getDeviceAttribute(attr, device) except? -1: + */ + __pyx_r = __pyx_v_count; + goto __pyx_L0; + + /* "cudf/_cuda/gpu.pyx":128 + * return version + * + * cpdef int getDeviceCount() except? -1: # <<<<<<<<<<<<<< + * cdef int count + * status = cudaGetDeviceCount(&count) + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* Python wrapper */ +static PyObject *__pyx_pw_4cudf_5_cuda_3gpu_5getDeviceCount(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static char __pyx_doc_4cudf_5_cuda_3gpu_4getDeviceCount[] = "getDeviceCount() -> int"; +static PyObject *__pyx_pw_4cudf_5_cuda_3gpu_5getDeviceCount(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("getDeviceCount (wrapper)", 0); + __pyx_r = __pyx_pf_4cudf_5_cuda_3gpu_4getDeviceCount(__pyx_self); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_4cudf_5_cuda_3gpu_4getDeviceCount(CYTHON_UNUSED PyObject *__pyx_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + __Pyx_RefNannySetupContext("getDeviceCount", 0); + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __pyx_f_4cudf_5_cuda_3gpu_getDeviceCount(0); if (unlikely(__pyx_t_1 == ((int)-1) && PyErr_Occurred())) __PYX_ERR(0, 128, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 128, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_AddTraceback("cudf._cuda.gpu.getDeviceCount", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "cudf/_cuda/gpu.pyx":133 + * return count + * + * cpdef int getDeviceAttribute(attr, device) except? -1: # <<<<<<<<<<<<<< + * cdef int value + * status = cudaDeviceGetAttribute(&value, attr, device) + */ + +static PyObject *__pyx_pw_4cudf_5_cuda_3gpu_7getDeviceAttribute(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static int __pyx_f_4cudf_5_cuda_3gpu_getDeviceAttribute(PyObject *__pyx_v_attr, PyObject *__pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) { + int __pyx_v_value; + CYTHON_UNUSED int __pyx_v_status; + int __pyx_r; + __Pyx_RefNannyDeclarations + enum cudaDeviceAttr __pyx_t_1; + int __pyx_t_2; + __Pyx_RefNannySetupContext("getDeviceAttribute", 0); + + /* "cudf/_cuda/gpu.pyx":135 + * cpdef int getDeviceAttribute(attr, device) except? -1: + * cdef int value + * status = cudaDeviceGetAttribute(&value, attr, device) # <<<<<<<<<<<<<< + * return value + */ + __pyx_t_1 = ((enum cudaDeviceAttr)__Pyx_PyInt_As_enum__cudaDeviceAttr(__pyx_v_attr)); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 135, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyInt_As_int(__pyx_v_device); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 135, __pyx_L1_error) + __pyx_v_status = cudaDeviceGetAttribute((&__pyx_v_value), __pyx_t_1, __pyx_t_2); + + /* "cudf/_cuda/gpu.pyx":136 + * cdef int value + * status = cudaDeviceGetAttribute(&value, attr, device) + * return value # <<<<<<<<<<<<<< + */ + __pyx_r = __pyx_v_value; + goto __pyx_L0; + + /* "cudf/_cuda/gpu.pyx":133 + * return count + * + * cpdef int getDeviceAttribute(attr, device) except? -1: # <<<<<<<<<<<<<< + * cdef int value + * status = cudaDeviceGetAttribute(&value, attr, device) + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_AddTraceback("cudf._cuda.gpu.getDeviceAttribute", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* Python wrapper */ +static PyObject *__pyx_pw_4cudf_5_cuda_3gpu_7getDeviceAttribute(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static char __pyx_doc_4cudf_5_cuda_3gpu_6getDeviceAttribute[] = "getDeviceAttribute(attr, device) -> int"; +static PyObject *__pyx_pw_4cudf_5_cuda_3gpu_7getDeviceAttribute(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + PyObject *__pyx_v_attr = 0; + PyObject *__pyx_v_device = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("getDeviceAttribute (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_attr,&__pyx_n_s_device,0}; + PyObject* values[2] = {0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_attr)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_device)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("getDeviceAttribute", 1, 2, 2, 1); __PYX_ERR(0, 133, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "getDeviceAttribute") < 0)) __PYX_ERR(0, 133, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + } + __pyx_v_attr = values[0]; + __pyx_v_device = values[1]; + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("getDeviceAttribute", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 133, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("cudf._cuda.gpu.getDeviceAttribute", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_4cudf_5_cuda_3gpu_6getDeviceAttribute(__pyx_self, __pyx_v_attr, __pyx_v_device); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_4cudf_5_cuda_3gpu_6getDeviceAttribute(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_attr, PyObject *__pyx_v_device) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + __Pyx_RefNannySetupContext("getDeviceAttribute", 0); + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __pyx_f_4cudf_5_cuda_3gpu_getDeviceAttribute(__pyx_v_attr, __pyx_v_device, 0); if (unlikely(__pyx_t_1 == ((int)-1) && PyErr_Occurred())) __PYX_ERR(0, 133, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 133, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_AddTraceback("cudf._cuda.gpu.getDeviceAttribute", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyMethodDef __pyx_methods[] = { + {"driverGetVersion", (PyCFunction)__pyx_pw_4cudf_5_cuda_3gpu_1driverGetVersion, METH_NOARGS, __pyx_doc_4cudf_5_cuda_3gpu_driverGetVersion}, + {"runtimeGetVersion", (PyCFunction)__pyx_pw_4cudf_5_cuda_3gpu_3runtimeGetVersion, METH_NOARGS, __pyx_doc_4cudf_5_cuda_3gpu_2runtimeGetVersion}, + {"getDeviceCount", (PyCFunction)__pyx_pw_4cudf_5_cuda_3gpu_5getDeviceCount, METH_NOARGS, __pyx_doc_4cudf_5_cuda_3gpu_4getDeviceCount}, + {"getDeviceAttribute", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_4cudf_5_cuda_3gpu_7getDeviceAttribute, METH_VARARGS|METH_KEYWORDS, __pyx_doc_4cudf_5_cuda_3gpu_6getDeviceAttribute}, + {0, 0, 0, 0} +}; + +#if PY_MAJOR_VERSION >= 3 +#if CYTHON_PEP489_MULTI_PHASE_INIT +static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ +static int __pyx_pymod_exec_gpu(PyObject* module); /*proto*/ +static PyModuleDef_Slot __pyx_moduledef_slots[] = { + {Py_mod_create, (void*)__pyx_pymod_create}, + {Py_mod_exec, (void*)__pyx_pymod_exec_gpu}, + {0, NULL} +}; +#endif + +static struct PyModuleDef __pyx_moduledef = { + PyModuleDef_HEAD_INIT, + "gpu", + 0, /* m_doc */ + #if CYTHON_PEP489_MULTI_PHASE_INIT + 0, /* m_size */ + #else + -1, /* m_size */ + #endif + __pyx_methods /* m_methods */, + #if CYTHON_PEP489_MULTI_PHASE_INIT + __pyx_moduledef_slots, /* m_slots */ + #else + NULL, /* m_reload */ + #endif + NULL, /* m_traverse */ + NULL, /* m_clear */ + NULL /* m_free */ +}; +#endif +#ifndef CYTHON_SMALL_CODE +#if defined(__clang__) + #define CYTHON_SMALL_CODE +#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) + #define CYTHON_SMALL_CODE __attribute__((cold)) +#else + #define CYTHON_SMALL_CODE +#endif +#endif + +static __Pyx_StringTabEntry __pyx_string_tab[] = { + {&__pyx_n_s_CudaDeviceAttr, __pyx_k_CudaDeviceAttr, sizeof(__pyx_k_CudaDeviceAttr), 0, 0, 1, 1}, + {&__pyx_n_s_IntEnum, __pyx_k_IntEnum, sizeof(__pyx_k_IntEnum), 0, 0, 1, 1}, + {&__pyx_n_s_attr, __pyx_k_attr, sizeof(__pyx_k_attr), 0, 0, 1, 1}, + {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrAsyncEngineCount, __pyx_k_cudaDevAttrAsyncEngineCount, sizeof(__pyx_k_cudaDevAttrAsyncEngineCount), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrCanFlushRemoteWrites, __pyx_k_cudaDevAttrCanFlushRemoteWrites, sizeof(__pyx_k_cudaDevAttrCanFlushRemoteWrites), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrCanMapHostMemory, __pyx_k_cudaDevAttrCanMapHostMemory, sizeof(__pyx_k_cudaDevAttrCanMapHostMemory), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrCanUseHostPointerForR, __pyx_k_cudaDevAttrCanUseHostPointerForR, sizeof(__pyx_k_cudaDevAttrCanUseHostPointerForR), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrClockRate, __pyx_k_cudaDevAttrClockRate, sizeof(__pyx_k_cudaDevAttrClockRate), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrComputeCapabilityMajo, __pyx_k_cudaDevAttrComputeCapabilityMajo, sizeof(__pyx_k_cudaDevAttrComputeCapabilityMajo), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrComputeCapabilityMino, __pyx_k_cudaDevAttrComputeCapabilityMino, sizeof(__pyx_k_cudaDevAttrComputeCapabilityMino), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrComputeMode, __pyx_k_cudaDevAttrComputeMode, sizeof(__pyx_k_cudaDevAttrComputeMode), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrComputePreemptionSupp, __pyx_k_cudaDevAttrComputePreemptionSupp, sizeof(__pyx_k_cudaDevAttrComputePreemptionSupp), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrConcurrentKernels, __pyx_k_cudaDevAttrConcurrentKernels, sizeof(__pyx_k_cudaDevAttrConcurrentKernels), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrConcurrentManagedAcce, __pyx_k_cudaDevAttrConcurrentManagedAcce, sizeof(__pyx_k_cudaDevAttrConcurrentManagedAcce), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrCooperativeLaunch, __pyx_k_cudaDevAttrCooperativeLaunch, sizeof(__pyx_k_cudaDevAttrCooperativeLaunch), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrCooperativeMultiDevic, __pyx_k_cudaDevAttrCooperativeMultiDevic, sizeof(__pyx_k_cudaDevAttrCooperativeMultiDevic), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrDirectManagedMemAcces, __pyx_k_cudaDevAttrDirectManagedMemAcces, sizeof(__pyx_k_cudaDevAttrDirectManagedMemAcces), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrEccEnabled, __pyx_k_cudaDevAttrEccEnabled, sizeof(__pyx_k_cudaDevAttrEccEnabled), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrGlobalL1CacheSupporte, __pyx_k_cudaDevAttrGlobalL1CacheSupporte, sizeof(__pyx_k_cudaDevAttrGlobalL1CacheSupporte), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrGlobalMemoryBusWidth, __pyx_k_cudaDevAttrGlobalMemoryBusWidth, sizeof(__pyx_k_cudaDevAttrGlobalMemoryBusWidth), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrGpuOverlap, __pyx_k_cudaDevAttrGpuOverlap, sizeof(__pyx_k_cudaDevAttrGpuOverlap), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrHostNativeAtomicSuppo, __pyx_k_cudaDevAttrHostNativeAtomicSuppo, sizeof(__pyx_k_cudaDevAttrHostNativeAtomicSuppo), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrHostRegisterSupported, __pyx_k_cudaDevAttrHostRegisterSupported, sizeof(__pyx_k_cudaDevAttrHostRegisterSupported), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrIntegrated, __pyx_k_cudaDevAttrIntegrated, sizeof(__pyx_k_cudaDevAttrIntegrated), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrIsMultiGpuBoard, __pyx_k_cudaDevAttrIsMultiGpuBoard, sizeof(__pyx_k_cudaDevAttrIsMultiGpuBoard), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrKernelExecTimeout, __pyx_k_cudaDevAttrKernelExecTimeout, sizeof(__pyx_k_cudaDevAttrKernelExecTimeout), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrL2CacheSize, __pyx_k_cudaDevAttrL2CacheSize, sizeof(__pyx_k_cudaDevAttrL2CacheSize), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrLocalL1CacheSupported, __pyx_k_cudaDevAttrLocalL1CacheSupported, sizeof(__pyx_k_cudaDevAttrLocalL1CacheSupported), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrManagedMemory, __pyx_k_cudaDevAttrManagedMemory, sizeof(__pyx_k_cudaDevAttrManagedMemory), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrMaxBlockDimX, __pyx_k_cudaDevAttrMaxBlockDimX, sizeof(__pyx_k_cudaDevAttrMaxBlockDimX), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrMaxBlockDimY, __pyx_k_cudaDevAttrMaxBlockDimY, sizeof(__pyx_k_cudaDevAttrMaxBlockDimY), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrMaxBlockDimZ, __pyx_k_cudaDevAttrMaxBlockDimZ, sizeof(__pyx_k_cudaDevAttrMaxBlockDimZ), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrMaxGridDimX, __pyx_k_cudaDevAttrMaxGridDimX, sizeof(__pyx_k_cudaDevAttrMaxGridDimX), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrMaxGridDimY, __pyx_k_cudaDevAttrMaxGridDimY, sizeof(__pyx_k_cudaDevAttrMaxGridDimY), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrMaxGridDimZ, __pyx_k_cudaDevAttrMaxGridDimZ, sizeof(__pyx_k_cudaDevAttrMaxGridDimZ), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrMaxPitch, __pyx_k_cudaDevAttrMaxPitch, sizeof(__pyx_k_cudaDevAttrMaxPitch), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrMaxRegistersPerBlock, __pyx_k_cudaDevAttrMaxRegistersPerBlock, sizeof(__pyx_k_cudaDevAttrMaxRegistersPerBlock), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrMaxRegistersPerMultip, __pyx_k_cudaDevAttrMaxRegistersPerMultip, sizeof(__pyx_k_cudaDevAttrMaxRegistersPerMultip), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrMaxSharedMemoryPerBlo, __pyx_k_cudaDevAttrMaxSharedMemoryPerBlo, sizeof(__pyx_k_cudaDevAttrMaxSharedMemoryPerBlo), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrMaxSharedMemoryPerBlo_2, __pyx_k_cudaDevAttrMaxSharedMemoryPerBlo_2, sizeof(__pyx_k_cudaDevAttrMaxSharedMemoryPerBlo_2), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrMaxSharedMemoryPerMul, __pyx_k_cudaDevAttrMaxSharedMemoryPerMul, sizeof(__pyx_k_cudaDevAttrMaxSharedMemoryPerMul), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrMaxSurface1DLayeredLa, __pyx_k_cudaDevAttrMaxSurface1DLayeredLa, sizeof(__pyx_k_cudaDevAttrMaxSurface1DLayeredLa), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrMaxSurface1DLayeredWi, __pyx_k_cudaDevAttrMaxSurface1DLayeredWi, sizeof(__pyx_k_cudaDevAttrMaxSurface1DLayeredWi), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrMaxSurface1DWidth, __pyx_k_cudaDevAttrMaxSurface1DWidth, sizeof(__pyx_k_cudaDevAttrMaxSurface1DWidth), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrMaxSurface2DHeight, __pyx_k_cudaDevAttrMaxSurface2DHeight, sizeof(__pyx_k_cudaDevAttrMaxSurface2DHeight), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrMaxSurface2DLayeredHe, __pyx_k_cudaDevAttrMaxSurface2DLayeredHe, sizeof(__pyx_k_cudaDevAttrMaxSurface2DLayeredHe), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrMaxSurface2DLayeredLa, __pyx_k_cudaDevAttrMaxSurface2DLayeredLa, sizeof(__pyx_k_cudaDevAttrMaxSurface2DLayeredLa), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrMaxSurface2DLayeredWi, __pyx_k_cudaDevAttrMaxSurface2DLayeredWi, sizeof(__pyx_k_cudaDevAttrMaxSurface2DLayeredWi), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrMaxSurface2DWidth, __pyx_k_cudaDevAttrMaxSurface2DWidth, sizeof(__pyx_k_cudaDevAttrMaxSurface2DWidth), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrMaxSurface3DDepth, __pyx_k_cudaDevAttrMaxSurface3DDepth, sizeof(__pyx_k_cudaDevAttrMaxSurface3DDepth), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrMaxSurface3DHeight, __pyx_k_cudaDevAttrMaxSurface3DHeight, sizeof(__pyx_k_cudaDevAttrMaxSurface3DHeight), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrMaxSurface3DWidth, __pyx_k_cudaDevAttrMaxSurface3DWidth, sizeof(__pyx_k_cudaDevAttrMaxSurface3DWidth), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrMaxSurfaceCubemapLaye, __pyx_k_cudaDevAttrMaxSurfaceCubemapLaye, sizeof(__pyx_k_cudaDevAttrMaxSurfaceCubemapLaye), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrMaxSurfaceCubemapLaye_2, __pyx_k_cudaDevAttrMaxSurfaceCubemapLaye_2, sizeof(__pyx_k_cudaDevAttrMaxSurfaceCubemapLaye_2), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrMaxSurfaceCubemapWidt, __pyx_k_cudaDevAttrMaxSurfaceCubemapWidt, sizeof(__pyx_k_cudaDevAttrMaxSurfaceCubemapWidt), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrMaxTexture1DLayeredLa, __pyx_k_cudaDevAttrMaxTexture1DLayeredLa, sizeof(__pyx_k_cudaDevAttrMaxTexture1DLayeredLa), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrMaxTexture1DLayeredWi, __pyx_k_cudaDevAttrMaxTexture1DLayeredWi, sizeof(__pyx_k_cudaDevAttrMaxTexture1DLayeredWi), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrMaxTexture1DLinearWid, __pyx_k_cudaDevAttrMaxTexture1DLinearWid, sizeof(__pyx_k_cudaDevAttrMaxTexture1DLinearWid), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrMaxTexture1DMipmapped, __pyx_k_cudaDevAttrMaxTexture1DMipmapped, sizeof(__pyx_k_cudaDevAttrMaxTexture1DMipmapped), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrMaxTexture1DWidth, __pyx_k_cudaDevAttrMaxTexture1DWidth, sizeof(__pyx_k_cudaDevAttrMaxTexture1DWidth), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrMaxTexture2DGatherHei, __pyx_k_cudaDevAttrMaxTexture2DGatherHei, sizeof(__pyx_k_cudaDevAttrMaxTexture2DGatherHei), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrMaxTexture2DGatherWid, __pyx_k_cudaDevAttrMaxTexture2DGatherWid, sizeof(__pyx_k_cudaDevAttrMaxTexture2DGatherWid), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrMaxTexture2DHeight, __pyx_k_cudaDevAttrMaxTexture2DHeight, sizeof(__pyx_k_cudaDevAttrMaxTexture2DHeight), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrMaxTexture2DLayeredHe, __pyx_k_cudaDevAttrMaxTexture2DLayeredHe, sizeof(__pyx_k_cudaDevAttrMaxTexture2DLayeredHe), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrMaxTexture2DLayeredLa, __pyx_k_cudaDevAttrMaxTexture2DLayeredLa, sizeof(__pyx_k_cudaDevAttrMaxTexture2DLayeredLa), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrMaxTexture2DLayeredWi, __pyx_k_cudaDevAttrMaxTexture2DLayeredWi, sizeof(__pyx_k_cudaDevAttrMaxTexture2DLayeredWi), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrMaxTexture2DLinearHei, __pyx_k_cudaDevAttrMaxTexture2DLinearHei, sizeof(__pyx_k_cudaDevAttrMaxTexture2DLinearHei), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrMaxTexture2DLinearPit, __pyx_k_cudaDevAttrMaxTexture2DLinearPit, sizeof(__pyx_k_cudaDevAttrMaxTexture2DLinearPit), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrMaxTexture2DLinearWid, __pyx_k_cudaDevAttrMaxTexture2DLinearWid, sizeof(__pyx_k_cudaDevAttrMaxTexture2DLinearWid), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrMaxTexture2DMipmapped, __pyx_k_cudaDevAttrMaxTexture2DMipmapped, sizeof(__pyx_k_cudaDevAttrMaxTexture2DMipmapped), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrMaxTexture2DMipmapped_2, __pyx_k_cudaDevAttrMaxTexture2DMipmapped_2, sizeof(__pyx_k_cudaDevAttrMaxTexture2DMipmapped_2), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrMaxTexture2DWidth, __pyx_k_cudaDevAttrMaxTexture2DWidth, sizeof(__pyx_k_cudaDevAttrMaxTexture2DWidth), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrMaxTexture3DDepth, __pyx_k_cudaDevAttrMaxTexture3DDepth, sizeof(__pyx_k_cudaDevAttrMaxTexture3DDepth), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrMaxTexture3DDepthAlt, __pyx_k_cudaDevAttrMaxTexture3DDepthAlt, sizeof(__pyx_k_cudaDevAttrMaxTexture3DDepthAlt), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrMaxTexture3DHeight, __pyx_k_cudaDevAttrMaxTexture3DHeight, sizeof(__pyx_k_cudaDevAttrMaxTexture3DHeight), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrMaxTexture3DHeightAlt, __pyx_k_cudaDevAttrMaxTexture3DHeightAlt, sizeof(__pyx_k_cudaDevAttrMaxTexture3DHeightAlt), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrMaxTexture3DWidth, __pyx_k_cudaDevAttrMaxTexture3DWidth, sizeof(__pyx_k_cudaDevAttrMaxTexture3DWidth), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrMaxTexture3DWidthAlt, __pyx_k_cudaDevAttrMaxTexture3DWidthAlt, sizeof(__pyx_k_cudaDevAttrMaxTexture3DWidthAlt), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrMaxTextureCubemapLaye, __pyx_k_cudaDevAttrMaxTextureCubemapLaye, sizeof(__pyx_k_cudaDevAttrMaxTextureCubemapLaye), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrMaxTextureCubemapLaye_2, __pyx_k_cudaDevAttrMaxTextureCubemapLaye_2, sizeof(__pyx_k_cudaDevAttrMaxTextureCubemapLaye_2), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrMaxTextureCubemapWidt, __pyx_k_cudaDevAttrMaxTextureCubemapWidt, sizeof(__pyx_k_cudaDevAttrMaxTextureCubemapWidt), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrMaxThreadsPerBlock, __pyx_k_cudaDevAttrMaxThreadsPerBlock, sizeof(__pyx_k_cudaDevAttrMaxThreadsPerBlock), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrMaxThreadsPerMultiPro, __pyx_k_cudaDevAttrMaxThreadsPerMultiPro, sizeof(__pyx_k_cudaDevAttrMaxThreadsPerMultiPro), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrMemoryClockRate, __pyx_k_cudaDevAttrMemoryClockRate, sizeof(__pyx_k_cudaDevAttrMemoryClockRate), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrMultiGpuBoardGroupID, __pyx_k_cudaDevAttrMultiGpuBoardGroupID, sizeof(__pyx_k_cudaDevAttrMultiGpuBoardGroupID), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrMultiProcessorCount, __pyx_k_cudaDevAttrMultiProcessorCount, sizeof(__pyx_k_cudaDevAttrMultiProcessorCount), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrPageableMemoryAccess, __pyx_k_cudaDevAttrPageableMemoryAccess, sizeof(__pyx_k_cudaDevAttrPageableMemoryAccess), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrPageableMemoryAccessU, __pyx_k_cudaDevAttrPageableMemoryAccessU, sizeof(__pyx_k_cudaDevAttrPageableMemoryAccessU), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrPciBusId, __pyx_k_cudaDevAttrPciBusId, sizeof(__pyx_k_cudaDevAttrPciBusId), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrPciDeviceId, __pyx_k_cudaDevAttrPciDeviceId, sizeof(__pyx_k_cudaDevAttrPciDeviceId), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrPciDomainId, __pyx_k_cudaDevAttrPciDomainId, sizeof(__pyx_k_cudaDevAttrPciDomainId), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrReserved92, __pyx_k_cudaDevAttrReserved92, sizeof(__pyx_k_cudaDevAttrReserved92), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrReserved93, __pyx_k_cudaDevAttrReserved93, sizeof(__pyx_k_cudaDevAttrReserved93), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrReserved94, __pyx_k_cudaDevAttrReserved94, sizeof(__pyx_k_cudaDevAttrReserved94), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrSingleToDoublePrecisi, __pyx_k_cudaDevAttrSingleToDoublePrecisi, sizeof(__pyx_k_cudaDevAttrSingleToDoublePrecisi), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrStreamPrioritiesSuppo, __pyx_k_cudaDevAttrStreamPrioritiesSuppo, sizeof(__pyx_k_cudaDevAttrStreamPrioritiesSuppo), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrSurfaceAlignment, __pyx_k_cudaDevAttrSurfaceAlignment, sizeof(__pyx_k_cudaDevAttrSurfaceAlignment), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrTccDriver, __pyx_k_cudaDevAttrTccDriver, sizeof(__pyx_k_cudaDevAttrTccDriver), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrTextureAlignment, __pyx_k_cudaDevAttrTextureAlignment, sizeof(__pyx_k_cudaDevAttrTextureAlignment), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrTexturePitchAlignment, __pyx_k_cudaDevAttrTexturePitchAlignment, sizeof(__pyx_k_cudaDevAttrTexturePitchAlignment), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrTotalConstantMemory, __pyx_k_cudaDevAttrTotalConstantMemory, sizeof(__pyx_k_cudaDevAttrTotalConstantMemory), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrUnifiedAddressing, __pyx_k_cudaDevAttrUnifiedAddressing, sizeof(__pyx_k_cudaDevAttrUnifiedAddressing), 0, 0, 1, 1}, + {&__pyx_n_s_cudaDevAttrWarpSize, __pyx_k_cudaDevAttrWarpSize, sizeof(__pyx_k_cudaDevAttrWarpSize), 0, 0, 1, 1}, + {&__pyx_n_s_cudf__cuda_gpu, __pyx_k_cudf__cuda_gpu, sizeof(__pyx_k_cudf__cuda_gpu), 0, 0, 1, 1}, + {&__pyx_n_s_device, __pyx_k_device, sizeof(__pyx_k_device), 0, 0, 1, 1}, + {&__pyx_n_s_doc, __pyx_k_doc, sizeof(__pyx_k_doc), 0, 0, 1, 1}, + {&__pyx_n_s_enum, __pyx_k_enum, sizeof(__pyx_k_enum), 0, 0, 1, 1}, + {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, + {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, + {&__pyx_n_s_metaclass, __pyx_k_metaclass, sizeof(__pyx_k_metaclass), 0, 0, 1, 1}, + {&__pyx_n_s_module, __pyx_k_module, sizeof(__pyx_k_module), 0, 0, 1, 1}, + {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1}, + {&__pyx_n_s_prepare, __pyx_k_prepare, sizeof(__pyx_k_prepare), 0, 0, 1, 1}, + {&__pyx_n_s_qualname, __pyx_k_qualname, sizeof(__pyx_k_qualname), 0, 0, 1, 1}, + {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, + {0, 0, 0, 0, 0, 0, 0} +}; +static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) { + return 0; +} + +static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); + __Pyx_RefNannyFinishContext(); + return 0; +} + +static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) { + if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); + return 0; + __pyx_L1_error:; + return -1; +} + +static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/ + +static int __Pyx_modinit_global_init_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0); + /*--- Global init code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_variable_export_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0); + /*--- Variable export code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_function_export_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0); + /*--- Function export code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_type_init_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0); + /*--- Type init code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_type_import_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0); + /*--- Type import code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_variable_import_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0); + /*--- Variable import code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_function_import_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0); + /*--- Function import code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + + +#if PY_MAJOR_VERSION < 3 +#ifdef CYTHON_NO_PYINIT_EXPORT +#define __Pyx_PyMODINIT_FUNC void +#else +#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC +#endif +#else +#ifdef CYTHON_NO_PYINIT_EXPORT +#define __Pyx_PyMODINIT_FUNC PyObject * +#else +#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC +#endif +#endif + + +#if PY_MAJOR_VERSION < 3 +__Pyx_PyMODINIT_FUNC initgpu(void) CYTHON_SMALL_CODE; /*proto*/ +__Pyx_PyMODINIT_FUNC initgpu(void) +#else +__Pyx_PyMODINIT_FUNC PyInit_gpu(void) CYTHON_SMALL_CODE; /*proto*/ +__Pyx_PyMODINIT_FUNC PyInit_gpu(void) +#if CYTHON_PEP489_MULTI_PHASE_INIT +{ + return PyModuleDef_Init(&__pyx_moduledef); +} +static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) { + #if PY_VERSION_HEX >= 0x030700A1 + static PY_INT64_T main_interpreter_id = -1; + PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp); + if (main_interpreter_id == -1) { + main_interpreter_id = current_id; + return (unlikely(current_id == -1)) ? -1 : 0; + } else if (unlikely(main_interpreter_id != current_id)) + #else + static PyInterpreterState *main_interpreter = NULL; + PyInterpreterState *current_interpreter = PyThreadState_Get()->interp; + if (!main_interpreter) { + main_interpreter = current_interpreter; + } else if (unlikely(main_interpreter != current_interpreter)) + #endif + { + PyErr_SetString( + PyExc_ImportError, + "Interpreter change detected - this module can only be loaded into one interpreter per process."); + return -1; + } + return 0; +} +static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) { + PyObject *value = PyObject_GetAttrString(spec, from_name); + int result = 0; + if (likely(value)) { + if (allow_none || value != Py_None) { + result = PyDict_SetItemString(moddict, to_name, value); + } + Py_DECREF(value); + } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { + PyErr_Clear(); + } else { + result = -1; + } + return result; +} +static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { + PyObject *module = NULL, *moddict, *modname; + if (__Pyx_check_single_interpreter()) + return NULL; + if (__pyx_m) + return __Pyx_NewRef(__pyx_m); + modname = PyObject_GetAttrString(spec, "name"); + if (unlikely(!modname)) goto bad; + module = PyModule_NewObject(modname); + Py_DECREF(modname); + if (unlikely(!module)) goto bad; + moddict = PyModule_GetDict(module); + if (unlikely(!moddict)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad; + return module; +bad: + Py_XDECREF(module); + return NULL; +} + + +static CYTHON_SMALL_CODE int __pyx_pymod_exec_gpu(PyObject *__pyx_pyinit_module) +#endif +#endif +{ + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + __Pyx_RefNannyDeclarations + #if CYTHON_PEP489_MULTI_PHASE_INIT + if (__pyx_m) { + if (__pyx_m == __pyx_pyinit_module) return 0; + PyErr_SetString(PyExc_RuntimeError, "Module 'gpu' has already been imported. Re-initialisation is not supported."); + return -1; + } + #elif PY_MAJOR_VERSION >= 3 + if (__pyx_m) return __Pyx_NewRef(__pyx_m); + #endif + #if CYTHON_REFNANNY +__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); +if (!__Pyx_RefNanny) { + PyErr_Clear(); + __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); + if (!__Pyx_RefNanny) + Py_FatalError("failed to import 'refnanny' module"); +} +#endif + __Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_gpu(void)", 0); + if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #ifdef __Pxy_PyFrame_Initialize_Offsets + __Pxy_PyFrame_Initialize_Offsets(); + #endif + __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) + #ifdef __Pyx_CyFunction_USED + if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_FusedFunction_USED + if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_Coroutine_USED + if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_Generator_USED + if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_AsyncGen_USED + if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_StopAsyncIteration_USED + if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + /*--- Library function declarations ---*/ + /*--- Threads initialization code ---*/ + #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS + #ifdef WITH_THREAD /* Python build with threading support? */ + PyEval_InitThreads(); + #endif + #endif + /*--- Module creation code ---*/ + #if CYTHON_PEP489_MULTI_PHASE_INIT + __pyx_m = __pyx_pyinit_module; + Py_INCREF(__pyx_m); + #else + #if PY_MAJOR_VERSION < 3 + __pyx_m = Py_InitModule4("gpu", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); + #else + __pyx_m = PyModule_Create(&__pyx_moduledef); + #endif + if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) + Py_INCREF(__pyx_d); + __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) + Py_INCREF(__pyx_b); + __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error) + Py_INCREF(__pyx_cython_runtime); + if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); + /*--- Initialize various global constants etc. ---*/ + if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) + if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + if (__pyx_module_is_main_cudf___cuda__gpu) { + if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + } + #if PY_MAJOR_VERSION >= 3 + { + PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) + if (!PyDict_GetItemString(modules, "cudf._cuda.gpu")) { + if (unlikely(PyDict_SetItemString(modules, "cudf._cuda.gpu", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) + } + } + #endif + /*--- Builtin init code ---*/ + if (__Pyx_InitCachedBuiltins() < 0) goto __pyx_L1_error; + /*--- Constants init code ---*/ + if (__Pyx_InitCachedConstants() < 0) goto __pyx_L1_error; + /*--- Global type/function init code ---*/ + (void)__Pyx_modinit_global_init_code(); + (void)__Pyx_modinit_variable_export_code(); + (void)__Pyx_modinit_function_export_code(); + (void)__Pyx_modinit_type_init_code(); + (void)__Pyx_modinit_type_import_code(); + (void)__Pyx_modinit_variable_import_code(); + (void)__Pyx_modinit_function_import_code(); + /*--- Execution code ---*/ + #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) + if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + + /* "cudf/_cuda/gpu.pyx":10 + * cudaDeviceAttr + * ) + * from enum import IntEnum # <<<<<<<<<<<<<< + * from cudf._cuda.gpu cimport underlying_type_attribute + * + */ + __pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_INCREF(__pyx_n_s_IntEnum); + __Pyx_GIVEREF(__pyx_n_s_IntEnum); + PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_s_IntEnum); + __pyx_t_2 = __Pyx_Import(__pyx_n_s_enum, __pyx_t_1, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_IntEnum); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_IntEnum, __pyx_t_1) < 0) __PYX_ERR(0, 10, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "cudf/_cuda/gpu.pyx":14 + * + * + * class CudaDeviceAttr(IntEnum): # <<<<<<<<<<<<<< + * cudaDevAttrMaxThreadsPerBlock = cudaDeviceAttr.cudaDevAttrMaxThreadsPerBlock + * cudaDevAttrMaxBlockDimX = cudaDeviceAttr.cudaDevAttrMaxBlockDimX + */ + __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_IntEnum); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_GIVEREF(__pyx_t_2); + PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_2); + __pyx_t_2 = 0; + __pyx_t_2 = __Pyx_CalculateMetaclass(NULL, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = __Pyx_Py3MetaclassPrepare(__pyx_t_2, __pyx_t_1, __pyx_n_s_CudaDeviceAttr, __pyx_n_s_CudaDeviceAttr, (PyObject *) NULL, __pyx_n_s_cudf__cuda_gpu, (PyObject *) NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + + /* "cudf/_cuda/gpu.pyx":15 + * + * class CudaDeviceAttr(IntEnum): + * cudaDevAttrMaxThreadsPerBlock = cudaDeviceAttr.cudaDevAttrMaxThreadsPerBlock # <<<<<<<<<<<<<< + * cudaDevAttrMaxBlockDimX = cudaDeviceAttr.cudaDevAttrMaxBlockDimX + * cudaDevAttrMaxBlockDimY = cudaDeviceAttr.cudaDevAttrMaxBlockDimY + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxThreadsPerBlock)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 15, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxThreadsPerBlock, __pyx_t_4) < 0) __PYX_ERR(0, 15, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":16 + * class CudaDeviceAttr(IntEnum): + * cudaDevAttrMaxThreadsPerBlock = cudaDeviceAttr.cudaDevAttrMaxThreadsPerBlock + * cudaDevAttrMaxBlockDimX = cudaDeviceAttr.cudaDevAttrMaxBlockDimX # <<<<<<<<<<<<<< + * cudaDevAttrMaxBlockDimY = cudaDeviceAttr.cudaDevAttrMaxBlockDimY + * cudaDevAttrMaxBlockDimZ = cudaDeviceAttr.cudaDevAttrMaxBlockDimZ + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxBlockDimX)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxBlockDimX, __pyx_t_4) < 0) __PYX_ERR(0, 16, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":17 + * cudaDevAttrMaxThreadsPerBlock = cudaDeviceAttr.cudaDevAttrMaxThreadsPerBlock + * cudaDevAttrMaxBlockDimX = cudaDeviceAttr.cudaDevAttrMaxBlockDimX + * cudaDevAttrMaxBlockDimY = cudaDeviceAttr.cudaDevAttrMaxBlockDimY # <<<<<<<<<<<<<< + * cudaDevAttrMaxBlockDimZ = cudaDeviceAttr.cudaDevAttrMaxBlockDimZ + * cudaDevAttrMaxGridDimX = cudaDeviceAttr.cudaDevAttrMaxGridDimX + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxBlockDimY)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxBlockDimY, __pyx_t_4) < 0) __PYX_ERR(0, 17, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":18 + * cudaDevAttrMaxBlockDimX = cudaDeviceAttr.cudaDevAttrMaxBlockDimX + * cudaDevAttrMaxBlockDimY = cudaDeviceAttr.cudaDevAttrMaxBlockDimY + * cudaDevAttrMaxBlockDimZ = cudaDeviceAttr.cudaDevAttrMaxBlockDimZ # <<<<<<<<<<<<<< + * cudaDevAttrMaxGridDimX = cudaDeviceAttr.cudaDevAttrMaxGridDimX + * cudaDevAttrMaxGridDimY = cudaDeviceAttr.cudaDevAttrMaxGridDimY + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxBlockDimZ)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxBlockDimZ, __pyx_t_4) < 0) __PYX_ERR(0, 18, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":19 + * cudaDevAttrMaxBlockDimY = cudaDeviceAttr.cudaDevAttrMaxBlockDimY + * cudaDevAttrMaxBlockDimZ = cudaDeviceAttr.cudaDevAttrMaxBlockDimZ + * cudaDevAttrMaxGridDimX = cudaDeviceAttr.cudaDevAttrMaxGridDimX # <<<<<<<<<<<<<< + * cudaDevAttrMaxGridDimY = cudaDeviceAttr.cudaDevAttrMaxGridDimY + * cudaDevAttrMaxGridDimZ = cudaDeviceAttr.cudaDevAttrMaxGridDimZ + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxGridDimX)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 19, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxGridDimX, __pyx_t_4) < 0) __PYX_ERR(0, 19, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":20 + * cudaDevAttrMaxBlockDimZ = cudaDeviceAttr.cudaDevAttrMaxBlockDimZ + * cudaDevAttrMaxGridDimX = cudaDeviceAttr.cudaDevAttrMaxGridDimX + * cudaDevAttrMaxGridDimY = cudaDeviceAttr.cudaDevAttrMaxGridDimY # <<<<<<<<<<<<<< + * cudaDevAttrMaxGridDimZ = cudaDeviceAttr.cudaDevAttrMaxGridDimZ + * cudaDevAttrMaxSharedMemoryPerBlock = cudaDeviceAttr.cudaDevAttrMaxSharedMemoryPerBlock + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxGridDimY)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 20, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxGridDimY, __pyx_t_4) < 0) __PYX_ERR(0, 20, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":21 + * cudaDevAttrMaxGridDimX = cudaDeviceAttr.cudaDevAttrMaxGridDimX + * cudaDevAttrMaxGridDimY = cudaDeviceAttr.cudaDevAttrMaxGridDimY + * cudaDevAttrMaxGridDimZ = cudaDeviceAttr.cudaDevAttrMaxGridDimZ # <<<<<<<<<<<<<< + * cudaDevAttrMaxSharedMemoryPerBlock = cudaDeviceAttr.cudaDevAttrMaxSharedMemoryPerBlock + * cudaDevAttrTotalConstantMemory = cudaDeviceAttr.cudaDevAttrTotalConstantMemory + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxGridDimZ)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxGridDimZ, __pyx_t_4) < 0) __PYX_ERR(0, 21, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":22 + * cudaDevAttrMaxGridDimY = cudaDeviceAttr.cudaDevAttrMaxGridDimY + * cudaDevAttrMaxGridDimZ = cudaDeviceAttr.cudaDevAttrMaxGridDimZ + * cudaDevAttrMaxSharedMemoryPerBlock = cudaDeviceAttr.cudaDevAttrMaxSharedMemoryPerBlock # <<<<<<<<<<<<<< + * cudaDevAttrTotalConstantMemory = cudaDeviceAttr.cudaDevAttrTotalConstantMemory + * cudaDevAttrWarpSize = cudaDeviceAttr.cudaDevAttrWarpSize + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxSharedMemoryPerBlock)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 22, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxSharedMemoryPerBlo, __pyx_t_4) < 0) __PYX_ERR(0, 22, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":23 + * cudaDevAttrMaxGridDimZ = cudaDeviceAttr.cudaDevAttrMaxGridDimZ + * cudaDevAttrMaxSharedMemoryPerBlock = cudaDeviceAttr.cudaDevAttrMaxSharedMemoryPerBlock + * cudaDevAttrTotalConstantMemory = cudaDeviceAttr.cudaDevAttrTotalConstantMemory # <<<<<<<<<<<<<< + * cudaDevAttrWarpSize = cudaDeviceAttr.cudaDevAttrWarpSize + * cudaDevAttrMaxPitch = cudaDeviceAttr.cudaDevAttrMaxPitch + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrTotalConstantMemory)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 23, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrTotalConstantMemory, __pyx_t_4) < 0) __PYX_ERR(0, 23, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":24 + * cudaDevAttrMaxSharedMemoryPerBlock = cudaDeviceAttr.cudaDevAttrMaxSharedMemoryPerBlock + * cudaDevAttrTotalConstantMemory = cudaDeviceAttr.cudaDevAttrTotalConstantMemory + * cudaDevAttrWarpSize = cudaDeviceAttr.cudaDevAttrWarpSize # <<<<<<<<<<<<<< + * cudaDevAttrMaxPitch = cudaDeviceAttr.cudaDevAttrMaxPitch + * cudaDevAttrMaxRegistersPerBlock = cudaDeviceAttr.cudaDevAttrMaxRegistersPerBlock + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrWarpSize)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 24, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrWarpSize, __pyx_t_4) < 0) __PYX_ERR(0, 24, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":25 + * cudaDevAttrTotalConstantMemory = cudaDeviceAttr.cudaDevAttrTotalConstantMemory + * cudaDevAttrWarpSize = cudaDeviceAttr.cudaDevAttrWarpSize + * cudaDevAttrMaxPitch = cudaDeviceAttr.cudaDevAttrMaxPitch # <<<<<<<<<<<<<< + * cudaDevAttrMaxRegistersPerBlock = cudaDeviceAttr.cudaDevAttrMaxRegistersPerBlock + * cudaDevAttrClockRate = cudaDeviceAttr.cudaDevAttrClockRate + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxPitch)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 25, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxPitch, __pyx_t_4) < 0) __PYX_ERR(0, 25, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":26 + * cudaDevAttrWarpSize = cudaDeviceAttr.cudaDevAttrWarpSize + * cudaDevAttrMaxPitch = cudaDeviceAttr.cudaDevAttrMaxPitch + * cudaDevAttrMaxRegistersPerBlock = cudaDeviceAttr.cudaDevAttrMaxRegistersPerBlock # <<<<<<<<<<<<<< + * cudaDevAttrClockRate = cudaDeviceAttr.cudaDevAttrClockRate + * cudaDevAttrTextureAlignment = cudaDeviceAttr.cudaDevAttrTextureAlignment + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxRegistersPerBlock)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 26, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxRegistersPerBlock, __pyx_t_4) < 0) __PYX_ERR(0, 26, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":27 + * cudaDevAttrMaxPitch = cudaDeviceAttr.cudaDevAttrMaxPitch + * cudaDevAttrMaxRegistersPerBlock = cudaDeviceAttr.cudaDevAttrMaxRegistersPerBlock + * cudaDevAttrClockRate = cudaDeviceAttr.cudaDevAttrClockRate # <<<<<<<<<<<<<< + * cudaDevAttrTextureAlignment = cudaDeviceAttr.cudaDevAttrTextureAlignment + * cudaDevAttrGpuOverlap = cudaDeviceAttr.cudaDevAttrGpuOverlap + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrClockRate)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 27, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrClockRate, __pyx_t_4) < 0) __PYX_ERR(0, 27, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":28 + * cudaDevAttrMaxRegistersPerBlock = cudaDeviceAttr.cudaDevAttrMaxRegistersPerBlock + * cudaDevAttrClockRate = cudaDeviceAttr.cudaDevAttrClockRate + * cudaDevAttrTextureAlignment = cudaDeviceAttr.cudaDevAttrTextureAlignment # <<<<<<<<<<<<<< + * cudaDevAttrGpuOverlap = cudaDeviceAttr.cudaDevAttrGpuOverlap + * cudaDevAttrMultiProcessorCount = cudaDeviceAttr.cudaDevAttrMultiProcessorCount + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrTextureAlignment)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 28, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrTextureAlignment, __pyx_t_4) < 0) __PYX_ERR(0, 28, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":29 + * cudaDevAttrClockRate = cudaDeviceAttr.cudaDevAttrClockRate + * cudaDevAttrTextureAlignment = cudaDeviceAttr.cudaDevAttrTextureAlignment + * cudaDevAttrGpuOverlap = cudaDeviceAttr.cudaDevAttrGpuOverlap # <<<<<<<<<<<<<< + * cudaDevAttrMultiProcessorCount = cudaDeviceAttr.cudaDevAttrMultiProcessorCount + * cudaDevAttrKernelExecTimeout = cudaDeviceAttr.cudaDevAttrKernelExecTimeout + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrGpuOverlap)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 29, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrGpuOverlap, __pyx_t_4) < 0) __PYX_ERR(0, 29, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":30 + * cudaDevAttrTextureAlignment = cudaDeviceAttr.cudaDevAttrTextureAlignment + * cudaDevAttrGpuOverlap = cudaDeviceAttr.cudaDevAttrGpuOverlap + * cudaDevAttrMultiProcessorCount = cudaDeviceAttr.cudaDevAttrMultiProcessorCount # <<<<<<<<<<<<<< + * cudaDevAttrKernelExecTimeout = cudaDeviceAttr.cudaDevAttrKernelExecTimeout + * cudaDevAttrIntegrated = cudaDeviceAttr.cudaDevAttrIntegrated + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMultiProcessorCount)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 30, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMultiProcessorCount, __pyx_t_4) < 0) __PYX_ERR(0, 30, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":31 + * cudaDevAttrGpuOverlap = cudaDeviceAttr.cudaDevAttrGpuOverlap + * cudaDevAttrMultiProcessorCount = cudaDeviceAttr.cudaDevAttrMultiProcessorCount + * cudaDevAttrKernelExecTimeout = cudaDeviceAttr.cudaDevAttrKernelExecTimeout # <<<<<<<<<<<<<< + * cudaDevAttrIntegrated = cudaDeviceAttr.cudaDevAttrIntegrated + * cudaDevAttrCanMapHostMemory = cudaDeviceAttr.cudaDevAttrCanMapHostMemory + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrKernelExecTimeout)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 31, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrKernelExecTimeout, __pyx_t_4) < 0) __PYX_ERR(0, 31, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":32 + * cudaDevAttrMultiProcessorCount = cudaDeviceAttr.cudaDevAttrMultiProcessorCount + * cudaDevAttrKernelExecTimeout = cudaDeviceAttr.cudaDevAttrKernelExecTimeout + * cudaDevAttrIntegrated = cudaDeviceAttr.cudaDevAttrIntegrated # <<<<<<<<<<<<<< + * cudaDevAttrCanMapHostMemory = cudaDeviceAttr.cudaDevAttrCanMapHostMemory + * cudaDevAttrComputeMode = cudaDeviceAttr.cudaDevAttrComputeMode + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrIntegrated)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 32, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrIntegrated, __pyx_t_4) < 0) __PYX_ERR(0, 32, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":33 + * cudaDevAttrKernelExecTimeout = cudaDeviceAttr.cudaDevAttrKernelExecTimeout + * cudaDevAttrIntegrated = cudaDeviceAttr.cudaDevAttrIntegrated + * cudaDevAttrCanMapHostMemory = cudaDeviceAttr.cudaDevAttrCanMapHostMemory # <<<<<<<<<<<<<< + * cudaDevAttrComputeMode = cudaDeviceAttr.cudaDevAttrComputeMode + * cudaDevAttrMaxTexture1DWidth = cudaDeviceAttr.cudaDevAttrMaxTexture1DWidth + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrCanMapHostMemory)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 33, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrCanMapHostMemory, __pyx_t_4) < 0) __PYX_ERR(0, 33, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":34 + * cudaDevAttrIntegrated = cudaDeviceAttr.cudaDevAttrIntegrated + * cudaDevAttrCanMapHostMemory = cudaDeviceAttr.cudaDevAttrCanMapHostMemory + * cudaDevAttrComputeMode = cudaDeviceAttr.cudaDevAttrComputeMode # <<<<<<<<<<<<<< + * cudaDevAttrMaxTexture1DWidth = cudaDeviceAttr.cudaDevAttrMaxTexture1DWidth + * cudaDevAttrMaxTexture2DWidth = cudaDeviceAttr.cudaDevAttrMaxTexture2DWidth + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrComputeMode)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 34, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrComputeMode, __pyx_t_4) < 0) __PYX_ERR(0, 34, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":35 + * cudaDevAttrCanMapHostMemory = cudaDeviceAttr.cudaDevAttrCanMapHostMemory + * cudaDevAttrComputeMode = cudaDeviceAttr.cudaDevAttrComputeMode + * cudaDevAttrMaxTexture1DWidth = cudaDeviceAttr.cudaDevAttrMaxTexture1DWidth # <<<<<<<<<<<<<< + * cudaDevAttrMaxTexture2DWidth = cudaDeviceAttr.cudaDevAttrMaxTexture2DWidth + * cudaDevAttrMaxTexture2DHeight = cudaDeviceAttr.cudaDevAttrMaxTexture2DHeight + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxTexture1DWidth)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 35, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxTexture1DWidth, __pyx_t_4) < 0) __PYX_ERR(0, 35, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":36 + * cudaDevAttrComputeMode = cudaDeviceAttr.cudaDevAttrComputeMode + * cudaDevAttrMaxTexture1DWidth = cudaDeviceAttr.cudaDevAttrMaxTexture1DWidth + * cudaDevAttrMaxTexture2DWidth = cudaDeviceAttr.cudaDevAttrMaxTexture2DWidth # <<<<<<<<<<<<<< + * cudaDevAttrMaxTexture2DHeight = cudaDeviceAttr.cudaDevAttrMaxTexture2DHeight + * cudaDevAttrMaxTexture3DWidth = cudaDeviceAttr.cudaDevAttrMaxTexture3DWidth + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxTexture2DWidth)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 36, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxTexture2DWidth, __pyx_t_4) < 0) __PYX_ERR(0, 36, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":37 + * cudaDevAttrMaxTexture1DWidth = cudaDeviceAttr.cudaDevAttrMaxTexture1DWidth + * cudaDevAttrMaxTexture2DWidth = cudaDeviceAttr.cudaDevAttrMaxTexture2DWidth + * cudaDevAttrMaxTexture2DHeight = cudaDeviceAttr.cudaDevAttrMaxTexture2DHeight # <<<<<<<<<<<<<< + * cudaDevAttrMaxTexture3DWidth = cudaDeviceAttr.cudaDevAttrMaxTexture3DWidth + * cudaDevAttrMaxTexture3DHeight = cudaDeviceAttr.cudaDevAttrMaxTexture3DHeight + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxTexture2DHeight)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 37, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxTexture2DHeight, __pyx_t_4) < 0) __PYX_ERR(0, 37, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":38 + * cudaDevAttrMaxTexture2DWidth = cudaDeviceAttr.cudaDevAttrMaxTexture2DWidth + * cudaDevAttrMaxTexture2DHeight = cudaDeviceAttr.cudaDevAttrMaxTexture2DHeight + * cudaDevAttrMaxTexture3DWidth = cudaDeviceAttr.cudaDevAttrMaxTexture3DWidth # <<<<<<<<<<<<<< + * cudaDevAttrMaxTexture3DHeight = cudaDeviceAttr.cudaDevAttrMaxTexture3DHeight + * cudaDevAttrMaxTexture3DDepth = cudaDeviceAttr.cudaDevAttrMaxTexture3DDepth + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxTexture3DWidth)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 38, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxTexture3DWidth, __pyx_t_4) < 0) __PYX_ERR(0, 38, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":39 + * cudaDevAttrMaxTexture2DHeight = cudaDeviceAttr.cudaDevAttrMaxTexture2DHeight + * cudaDevAttrMaxTexture3DWidth = cudaDeviceAttr.cudaDevAttrMaxTexture3DWidth + * cudaDevAttrMaxTexture3DHeight = cudaDeviceAttr.cudaDevAttrMaxTexture3DHeight # <<<<<<<<<<<<<< + * cudaDevAttrMaxTexture3DDepth = cudaDeviceAttr.cudaDevAttrMaxTexture3DDepth + * cudaDevAttrMaxTexture2DLayeredWidth = cudaDeviceAttr.cudaDevAttrMaxTexture2DLayeredWidth + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxTexture3DHeight)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 39, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxTexture3DHeight, __pyx_t_4) < 0) __PYX_ERR(0, 39, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":40 + * cudaDevAttrMaxTexture3DWidth = cudaDeviceAttr.cudaDevAttrMaxTexture3DWidth + * cudaDevAttrMaxTexture3DHeight = cudaDeviceAttr.cudaDevAttrMaxTexture3DHeight + * cudaDevAttrMaxTexture3DDepth = cudaDeviceAttr.cudaDevAttrMaxTexture3DDepth # <<<<<<<<<<<<<< + * cudaDevAttrMaxTexture2DLayeredWidth = cudaDeviceAttr.cudaDevAttrMaxTexture2DLayeredWidth + * cudaDevAttrMaxTexture2DLayeredHeight = cudaDeviceAttr.cudaDevAttrMaxTexture2DLayeredHeight + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxTexture3DDepth)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 40, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxTexture3DDepth, __pyx_t_4) < 0) __PYX_ERR(0, 40, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":41 + * cudaDevAttrMaxTexture3DHeight = cudaDeviceAttr.cudaDevAttrMaxTexture3DHeight + * cudaDevAttrMaxTexture3DDepth = cudaDeviceAttr.cudaDevAttrMaxTexture3DDepth + * cudaDevAttrMaxTexture2DLayeredWidth = cudaDeviceAttr.cudaDevAttrMaxTexture2DLayeredWidth # <<<<<<<<<<<<<< + * cudaDevAttrMaxTexture2DLayeredHeight = cudaDeviceAttr.cudaDevAttrMaxTexture2DLayeredHeight + * cudaDevAttrMaxTexture2DLayeredLayers = cudaDeviceAttr.cudaDevAttrMaxTexture2DLayeredLayers + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxTexture2DLayeredWidth)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 41, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxTexture2DLayeredWi, __pyx_t_4) < 0) __PYX_ERR(0, 41, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":42 + * cudaDevAttrMaxTexture3DDepth = cudaDeviceAttr.cudaDevAttrMaxTexture3DDepth + * cudaDevAttrMaxTexture2DLayeredWidth = cudaDeviceAttr.cudaDevAttrMaxTexture2DLayeredWidth + * cudaDevAttrMaxTexture2DLayeredHeight = cudaDeviceAttr.cudaDevAttrMaxTexture2DLayeredHeight # <<<<<<<<<<<<<< + * cudaDevAttrMaxTexture2DLayeredLayers = cudaDeviceAttr.cudaDevAttrMaxTexture2DLayeredLayers + * cudaDevAttrSurfaceAlignment = cudaDeviceAttr.cudaDevAttrSurfaceAlignment + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxTexture2DLayeredHeight)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 42, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxTexture2DLayeredHe, __pyx_t_4) < 0) __PYX_ERR(0, 42, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":43 + * cudaDevAttrMaxTexture2DLayeredWidth = cudaDeviceAttr.cudaDevAttrMaxTexture2DLayeredWidth + * cudaDevAttrMaxTexture2DLayeredHeight = cudaDeviceAttr.cudaDevAttrMaxTexture2DLayeredHeight + * cudaDevAttrMaxTexture2DLayeredLayers = cudaDeviceAttr.cudaDevAttrMaxTexture2DLayeredLayers # <<<<<<<<<<<<<< + * cudaDevAttrSurfaceAlignment = cudaDeviceAttr.cudaDevAttrSurfaceAlignment + * cudaDevAttrConcurrentKernels = cudaDeviceAttr.cudaDevAttrConcurrentKernels + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxTexture2DLayeredLayers)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 43, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxTexture2DLayeredLa, __pyx_t_4) < 0) __PYX_ERR(0, 43, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":44 + * cudaDevAttrMaxTexture2DLayeredHeight = cudaDeviceAttr.cudaDevAttrMaxTexture2DLayeredHeight + * cudaDevAttrMaxTexture2DLayeredLayers = cudaDeviceAttr.cudaDevAttrMaxTexture2DLayeredLayers + * cudaDevAttrSurfaceAlignment = cudaDeviceAttr.cudaDevAttrSurfaceAlignment # <<<<<<<<<<<<<< + * cudaDevAttrConcurrentKernels = cudaDeviceAttr.cudaDevAttrConcurrentKernels + * cudaDevAttrEccEnabled = cudaDeviceAttr.cudaDevAttrEccEnabled + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrSurfaceAlignment)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 44, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrSurfaceAlignment, __pyx_t_4) < 0) __PYX_ERR(0, 44, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":45 + * cudaDevAttrMaxTexture2DLayeredLayers = cudaDeviceAttr.cudaDevAttrMaxTexture2DLayeredLayers + * cudaDevAttrSurfaceAlignment = cudaDeviceAttr.cudaDevAttrSurfaceAlignment + * cudaDevAttrConcurrentKernels = cudaDeviceAttr.cudaDevAttrConcurrentKernels # <<<<<<<<<<<<<< + * cudaDevAttrEccEnabled = cudaDeviceAttr.cudaDevAttrEccEnabled + * cudaDevAttrPciBusId = cudaDeviceAttr.cudaDevAttrPciBusId + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrConcurrentKernels)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 45, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrConcurrentKernels, __pyx_t_4) < 0) __PYX_ERR(0, 45, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":46 + * cudaDevAttrSurfaceAlignment = cudaDeviceAttr.cudaDevAttrSurfaceAlignment + * cudaDevAttrConcurrentKernels = cudaDeviceAttr.cudaDevAttrConcurrentKernels + * cudaDevAttrEccEnabled = cudaDeviceAttr.cudaDevAttrEccEnabled # <<<<<<<<<<<<<< + * cudaDevAttrPciBusId = cudaDeviceAttr.cudaDevAttrPciBusId + * cudaDevAttrPciDeviceId = cudaDeviceAttr.cudaDevAttrPciDeviceId + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrEccEnabled)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 46, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrEccEnabled, __pyx_t_4) < 0) __PYX_ERR(0, 46, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":47 + * cudaDevAttrConcurrentKernels = cudaDeviceAttr.cudaDevAttrConcurrentKernels + * cudaDevAttrEccEnabled = cudaDeviceAttr.cudaDevAttrEccEnabled + * cudaDevAttrPciBusId = cudaDeviceAttr.cudaDevAttrPciBusId # <<<<<<<<<<<<<< + * cudaDevAttrPciDeviceId = cudaDeviceAttr.cudaDevAttrPciDeviceId + * cudaDevAttrTccDriver = cudaDeviceAttr.cudaDevAttrTccDriver + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrPciBusId)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 47, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrPciBusId, __pyx_t_4) < 0) __PYX_ERR(0, 47, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":48 + * cudaDevAttrEccEnabled = cudaDeviceAttr.cudaDevAttrEccEnabled + * cudaDevAttrPciBusId = cudaDeviceAttr.cudaDevAttrPciBusId + * cudaDevAttrPciDeviceId = cudaDeviceAttr.cudaDevAttrPciDeviceId # <<<<<<<<<<<<<< + * cudaDevAttrTccDriver = cudaDeviceAttr.cudaDevAttrTccDriver + * cudaDevAttrMemoryClockRate = cudaDeviceAttr.cudaDevAttrMemoryClockRate + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrPciDeviceId)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 48, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrPciDeviceId, __pyx_t_4) < 0) __PYX_ERR(0, 48, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":49 + * cudaDevAttrPciBusId = cudaDeviceAttr.cudaDevAttrPciBusId + * cudaDevAttrPciDeviceId = cudaDeviceAttr.cudaDevAttrPciDeviceId + * cudaDevAttrTccDriver = cudaDeviceAttr.cudaDevAttrTccDriver # <<<<<<<<<<<<<< + * cudaDevAttrMemoryClockRate = cudaDeviceAttr.cudaDevAttrMemoryClockRate + * cudaDevAttrGlobalMemoryBusWidth = cudaDeviceAttr.cudaDevAttrGlobalMemoryBusWidth + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrTccDriver)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 49, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrTccDriver, __pyx_t_4) < 0) __PYX_ERR(0, 49, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":50 + * cudaDevAttrPciDeviceId = cudaDeviceAttr.cudaDevAttrPciDeviceId + * cudaDevAttrTccDriver = cudaDeviceAttr.cudaDevAttrTccDriver + * cudaDevAttrMemoryClockRate = cudaDeviceAttr.cudaDevAttrMemoryClockRate # <<<<<<<<<<<<<< + * cudaDevAttrGlobalMemoryBusWidth = cudaDeviceAttr.cudaDevAttrGlobalMemoryBusWidth + * cudaDevAttrL2CacheSize = cudaDeviceAttr.cudaDevAttrL2CacheSize + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMemoryClockRate)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 50, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMemoryClockRate, __pyx_t_4) < 0) __PYX_ERR(0, 50, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":51 + * cudaDevAttrTccDriver = cudaDeviceAttr.cudaDevAttrTccDriver + * cudaDevAttrMemoryClockRate = cudaDeviceAttr.cudaDevAttrMemoryClockRate + * cudaDevAttrGlobalMemoryBusWidth = cudaDeviceAttr.cudaDevAttrGlobalMemoryBusWidth # <<<<<<<<<<<<<< + * cudaDevAttrL2CacheSize = cudaDeviceAttr.cudaDevAttrL2CacheSize + * cudaDevAttrMaxThreadsPerMultiProcessor = cudaDeviceAttr.cudaDevAttrMaxThreadsPerMultiProcessor + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrGlobalMemoryBusWidth)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 51, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrGlobalMemoryBusWidth, __pyx_t_4) < 0) __PYX_ERR(0, 51, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":52 + * cudaDevAttrMemoryClockRate = cudaDeviceAttr.cudaDevAttrMemoryClockRate + * cudaDevAttrGlobalMemoryBusWidth = cudaDeviceAttr.cudaDevAttrGlobalMemoryBusWidth + * cudaDevAttrL2CacheSize = cudaDeviceAttr.cudaDevAttrL2CacheSize # <<<<<<<<<<<<<< + * cudaDevAttrMaxThreadsPerMultiProcessor = cudaDeviceAttr.cudaDevAttrMaxThreadsPerMultiProcessor + * cudaDevAttrAsyncEngineCount = cudaDeviceAttr.cudaDevAttrAsyncEngineCount + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrL2CacheSize)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 52, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrL2CacheSize, __pyx_t_4) < 0) __PYX_ERR(0, 52, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":53 + * cudaDevAttrGlobalMemoryBusWidth = cudaDeviceAttr.cudaDevAttrGlobalMemoryBusWidth + * cudaDevAttrL2CacheSize = cudaDeviceAttr.cudaDevAttrL2CacheSize + * cudaDevAttrMaxThreadsPerMultiProcessor = cudaDeviceAttr.cudaDevAttrMaxThreadsPerMultiProcessor # <<<<<<<<<<<<<< + * cudaDevAttrAsyncEngineCount = cudaDeviceAttr.cudaDevAttrAsyncEngineCount + * cudaDevAttrUnifiedAddressing = cudaDeviceAttr.cudaDevAttrUnifiedAddressing + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxThreadsPerMultiProcessor)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 53, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxThreadsPerMultiPro, __pyx_t_4) < 0) __PYX_ERR(0, 53, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":54 + * cudaDevAttrL2CacheSize = cudaDeviceAttr.cudaDevAttrL2CacheSize + * cudaDevAttrMaxThreadsPerMultiProcessor = cudaDeviceAttr.cudaDevAttrMaxThreadsPerMultiProcessor + * cudaDevAttrAsyncEngineCount = cudaDeviceAttr.cudaDevAttrAsyncEngineCount # <<<<<<<<<<<<<< + * cudaDevAttrUnifiedAddressing = cudaDeviceAttr.cudaDevAttrUnifiedAddressing + * cudaDevAttrMaxTexture1DLayeredWidth = cudaDeviceAttr.cudaDevAttrMaxTexture1DLayeredWidth + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrAsyncEngineCount)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 54, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrAsyncEngineCount, __pyx_t_4) < 0) __PYX_ERR(0, 54, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":55 + * cudaDevAttrMaxThreadsPerMultiProcessor = cudaDeviceAttr.cudaDevAttrMaxThreadsPerMultiProcessor + * cudaDevAttrAsyncEngineCount = cudaDeviceAttr.cudaDevAttrAsyncEngineCount + * cudaDevAttrUnifiedAddressing = cudaDeviceAttr.cudaDevAttrUnifiedAddressing # <<<<<<<<<<<<<< + * cudaDevAttrMaxTexture1DLayeredWidth = cudaDeviceAttr.cudaDevAttrMaxTexture1DLayeredWidth + * cudaDevAttrMaxTexture1DLayeredLayers = cudaDeviceAttr.cudaDevAttrMaxTexture1DLayeredLayers + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrUnifiedAddressing)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 55, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrUnifiedAddressing, __pyx_t_4) < 0) __PYX_ERR(0, 55, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":56 + * cudaDevAttrAsyncEngineCount = cudaDeviceAttr.cudaDevAttrAsyncEngineCount + * cudaDevAttrUnifiedAddressing = cudaDeviceAttr.cudaDevAttrUnifiedAddressing + * cudaDevAttrMaxTexture1DLayeredWidth = cudaDeviceAttr.cudaDevAttrMaxTexture1DLayeredWidth # <<<<<<<<<<<<<< + * cudaDevAttrMaxTexture1DLayeredLayers = cudaDeviceAttr.cudaDevAttrMaxTexture1DLayeredLayers + * cudaDevAttrMaxTexture2DGatherWidth = cudaDeviceAttr.cudaDevAttrMaxTexture2DGatherWidth + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxTexture1DLayeredWidth)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 56, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxTexture1DLayeredWi, __pyx_t_4) < 0) __PYX_ERR(0, 56, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":57 + * cudaDevAttrUnifiedAddressing = cudaDeviceAttr.cudaDevAttrUnifiedAddressing + * cudaDevAttrMaxTexture1DLayeredWidth = cudaDeviceAttr.cudaDevAttrMaxTexture1DLayeredWidth + * cudaDevAttrMaxTexture1DLayeredLayers = cudaDeviceAttr.cudaDevAttrMaxTexture1DLayeredLayers # <<<<<<<<<<<<<< + * cudaDevAttrMaxTexture2DGatherWidth = cudaDeviceAttr.cudaDevAttrMaxTexture2DGatherWidth + * cudaDevAttrMaxTexture2DGatherHeight = cudaDeviceAttr.cudaDevAttrMaxTexture2DGatherHeight + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxTexture1DLayeredLayers)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 57, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxTexture1DLayeredLa, __pyx_t_4) < 0) __PYX_ERR(0, 57, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":58 + * cudaDevAttrMaxTexture1DLayeredWidth = cudaDeviceAttr.cudaDevAttrMaxTexture1DLayeredWidth + * cudaDevAttrMaxTexture1DLayeredLayers = cudaDeviceAttr.cudaDevAttrMaxTexture1DLayeredLayers + * cudaDevAttrMaxTexture2DGatherWidth = cudaDeviceAttr.cudaDevAttrMaxTexture2DGatherWidth # <<<<<<<<<<<<<< + * cudaDevAttrMaxTexture2DGatherHeight = cudaDeviceAttr.cudaDevAttrMaxTexture2DGatherHeight + * cudaDevAttrMaxTexture3DWidthAlt = cudaDeviceAttr.cudaDevAttrMaxTexture3DWidthAlt + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxTexture2DGatherWidth)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 58, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxTexture2DGatherWid, __pyx_t_4) < 0) __PYX_ERR(0, 58, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":59 + * cudaDevAttrMaxTexture1DLayeredLayers = cudaDeviceAttr.cudaDevAttrMaxTexture1DLayeredLayers + * cudaDevAttrMaxTexture2DGatherWidth = cudaDeviceAttr.cudaDevAttrMaxTexture2DGatherWidth + * cudaDevAttrMaxTexture2DGatherHeight = cudaDeviceAttr.cudaDevAttrMaxTexture2DGatherHeight # <<<<<<<<<<<<<< + * cudaDevAttrMaxTexture3DWidthAlt = cudaDeviceAttr.cudaDevAttrMaxTexture3DWidthAlt + * cudaDevAttrMaxTexture3DHeightAlt = cudaDeviceAttr.cudaDevAttrMaxTexture3DHeightAlt + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxTexture2DGatherHeight)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 59, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxTexture2DGatherHei, __pyx_t_4) < 0) __PYX_ERR(0, 59, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":60 + * cudaDevAttrMaxTexture2DGatherWidth = cudaDeviceAttr.cudaDevAttrMaxTexture2DGatherWidth + * cudaDevAttrMaxTexture2DGatherHeight = cudaDeviceAttr.cudaDevAttrMaxTexture2DGatherHeight + * cudaDevAttrMaxTexture3DWidthAlt = cudaDeviceAttr.cudaDevAttrMaxTexture3DWidthAlt # <<<<<<<<<<<<<< + * cudaDevAttrMaxTexture3DHeightAlt = cudaDeviceAttr.cudaDevAttrMaxTexture3DHeightAlt + * cudaDevAttrMaxTexture3DDepthAlt = cudaDeviceAttr.cudaDevAttrMaxTexture3DDepthAlt + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxTexture3DWidthAlt)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 60, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxTexture3DWidthAlt, __pyx_t_4) < 0) __PYX_ERR(0, 60, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":61 + * cudaDevAttrMaxTexture2DGatherHeight = cudaDeviceAttr.cudaDevAttrMaxTexture2DGatherHeight + * cudaDevAttrMaxTexture3DWidthAlt = cudaDeviceAttr.cudaDevAttrMaxTexture3DWidthAlt + * cudaDevAttrMaxTexture3DHeightAlt = cudaDeviceAttr.cudaDevAttrMaxTexture3DHeightAlt # <<<<<<<<<<<<<< + * cudaDevAttrMaxTexture3DDepthAlt = cudaDeviceAttr.cudaDevAttrMaxTexture3DDepthAlt + * cudaDevAttrPciDomainId = cudaDeviceAttr.cudaDevAttrPciDomainId + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxTexture3DHeightAlt)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 61, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxTexture3DHeightAlt, __pyx_t_4) < 0) __PYX_ERR(0, 61, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":62 + * cudaDevAttrMaxTexture3DWidthAlt = cudaDeviceAttr.cudaDevAttrMaxTexture3DWidthAlt + * cudaDevAttrMaxTexture3DHeightAlt = cudaDeviceAttr.cudaDevAttrMaxTexture3DHeightAlt + * cudaDevAttrMaxTexture3DDepthAlt = cudaDeviceAttr.cudaDevAttrMaxTexture3DDepthAlt # <<<<<<<<<<<<<< + * cudaDevAttrPciDomainId = cudaDeviceAttr.cudaDevAttrPciDomainId + * cudaDevAttrTexturePitchAlignment = cudaDeviceAttr.cudaDevAttrTexturePitchAlignment + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxTexture3DDepthAlt)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 62, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxTexture3DDepthAlt, __pyx_t_4) < 0) __PYX_ERR(0, 62, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":63 + * cudaDevAttrMaxTexture3DHeightAlt = cudaDeviceAttr.cudaDevAttrMaxTexture3DHeightAlt + * cudaDevAttrMaxTexture3DDepthAlt = cudaDeviceAttr.cudaDevAttrMaxTexture3DDepthAlt + * cudaDevAttrPciDomainId = cudaDeviceAttr.cudaDevAttrPciDomainId # <<<<<<<<<<<<<< + * cudaDevAttrTexturePitchAlignment = cudaDeviceAttr.cudaDevAttrTexturePitchAlignment + * cudaDevAttrMaxTextureCubemapWidth = cudaDeviceAttr.cudaDevAttrMaxTextureCubemapWidth + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrPciDomainId)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 63, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrPciDomainId, __pyx_t_4) < 0) __PYX_ERR(0, 63, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":64 + * cudaDevAttrMaxTexture3DDepthAlt = cudaDeviceAttr.cudaDevAttrMaxTexture3DDepthAlt + * cudaDevAttrPciDomainId = cudaDeviceAttr.cudaDevAttrPciDomainId + * cudaDevAttrTexturePitchAlignment = cudaDeviceAttr.cudaDevAttrTexturePitchAlignment # <<<<<<<<<<<<<< + * cudaDevAttrMaxTextureCubemapWidth = cudaDeviceAttr.cudaDevAttrMaxTextureCubemapWidth + * cudaDevAttrMaxTextureCubemapLayeredWidth = cudaDeviceAttr.cudaDevAttrMaxTextureCubemapLayeredWidth + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrTexturePitchAlignment)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 64, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrTexturePitchAlignment, __pyx_t_4) < 0) __PYX_ERR(0, 64, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":65 + * cudaDevAttrPciDomainId = cudaDeviceAttr.cudaDevAttrPciDomainId + * cudaDevAttrTexturePitchAlignment = cudaDeviceAttr.cudaDevAttrTexturePitchAlignment + * cudaDevAttrMaxTextureCubemapWidth = cudaDeviceAttr.cudaDevAttrMaxTextureCubemapWidth # <<<<<<<<<<<<<< + * cudaDevAttrMaxTextureCubemapLayeredWidth = cudaDeviceAttr.cudaDevAttrMaxTextureCubemapLayeredWidth + * cudaDevAttrMaxTextureCubemapLayeredLayers = cudaDeviceAttr.cudaDevAttrMaxTextureCubemapLayeredLayers + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxTextureCubemapWidth)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 65, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxTextureCubemapWidt, __pyx_t_4) < 0) __PYX_ERR(0, 65, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":66 + * cudaDevAttrTexturePitchAlignment = cudaDeviceAttr.cudaDevAttrTexturePitchAlignment + * cudaDevAttrMaxTextureCubemapWidth = cudaDeviceAttr.cudaDevAttrMaxTextureCubemapWidth + * cudaDevAttrMaxTextureCubemapLayeredWidth = cudaDeviceAttr.cudaDevAttrMaxTextureCubemapLayeredWidth # <<<<<<<<<<<<<< + * cudaDevAttrMaxTextureCubemapLayeredLayers = cudaDeviceAttr.cudaDevAttrMaxTextureCubemapLayeredLayers + * cudaDevAttrMaxSurface1DWidth = cudaDeviceAttr.cudaDevAttrMaxSurface1DWidth + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxTextureCubemapLayeredWidth)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 66, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxTextureCubemapLaye, __pyx_t_4) < 0) __PYX_ERR(0, 66, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":67 + * cudaDevAttrMaxTextureCubemapWidth = cudaDeviceAttr.cudaDevAttrMaxTextureCubemapWidth + * cudaDevAttrMaxTextureCubemapLayeredWidth = cudaDeviceAttr.cudaDevAttrMaxTextureCubemapLayeredWidth + * cudaDevAttrMaxTextureCubemapLayeredLayers = cudaDeviceAttr.cudaDevAttrMaxTextureCubemapLayeredLayers # <<<<<<<<<<<<<< + * cudaDevAttrMaxSurface1DWidth = cudaDeviceAttr.cudaDevAttrMaxSurface1DWidth + * cudaDevAttrMaxSurface2DWidth = cudaDeviceAttr.cudaDevAttrMaxSurface2DWidth + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxTextureCubemapLayeredLayers)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 67, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxTextureCubemapLaye_2, __pyx_t_4) < 0) __PYX_ERR(0, 67, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":68 + * cudaDevAttrMaxTextureCubemapLayeredWidth = cudaDeviceAttr.cudaDevAttrMaxTextureCubemapLayeredWidth + * cudaDevAttrMaxTextureCubemapLayeredLayers = cudaDeviceAttr.cudaDevAttrMaxTextureCubemapLayeredLayers + * cudaDevAttrMaxSurface1DWidth = cudaDeviceAttr.cudaDevAttrMaxSurface1DWidth # <<<<<<<<<<<<<< + * cudaDevAttrMaxSurface2DWidth = cudaDeviceAttr.cudaDevAttrMaxSurface2DWidth + * cudaDevAttrMaxSurface2DHeight = cudaDeviceAttr.cudaDevAttrMaxSurface2DHeight + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxSurface1DWidth)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 68, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxSurface1DWidth, __pyx_t_4) < 0) __PYX_ERR(0, 68, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":69 + * cudaDevAttrMaxTextureCubemapLayeredLayers = cudaDeviceAttr.cudaDevAttrMaxTextureCubemapLayeredLayers + * cudaDevAttrMaxSurface1DWidth = cudaDeviceAttr.cudaDevAttrMaxSurface1DWidth + * cudaDevAttrMaxSurface2DWidth = cudaDeviceAttr.cudaDevAttrMaxSurface2DWidth # <<<<<<<<<<<<<< + * cudaDevAttrMaxSurface2DHeight = cudaDeviceAttr.cudaDevAttrMaxSurface2DHeight + * cudaDevAttrMaxSurface3DWidth = cudaDeviceAttr.cudaDevAttrMaxSurface3DWidth + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxSurface2DWidth)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 69, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxSurface2DWidth, __pyx_t_4) < 0) __PYX_ERR(0, 69, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":70 + * cudaDevAttrMaxSurface1DWidth = cudaDeviceAttr.cudaDevAttrMaxSurface1DWidth + * cudaDevAttrMaxSurface2DWidth = cudaDeviceAttr.cudaDevAttrMaxSurface2DWidth + * cudaDevAttrMaxSurface2DHeight = cudaDeviceAttr.cudaDevAttrMaxSurface2DHeight # <<<<<<<<<<<<<< + * cudaDevAttrMaxSurface3DWidth = cudaDeviceAttr.cudaDevAttrMaxSurface3DWidth + * cudaDevAttrMaxSurface3DHeight = cudaDeviceAttr.cudaDevAttrMaxSurface3DHeight + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxSurface2DHeight)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 70, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxSurface2DHeight, __pyx_t_4) < 0) __PYX_ERR(0, 70, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":71 + * cudaDevAttrMaxSurface2DWidth = cudaDeviceAttr.cudaDevAttrMaxSurface2DWidth + * cudaDevAttrMaxSurface2DHeight = cudaDeviceAttr.cudaDevAttrMaxSurface2DHeight + * cudaDevAttrMaxSurface3DWidth = cudaDeviceAttr.cudaDevAttrMaxSurface3DWidth # <<<<<<<<<<<<<< + * cudaDevAttrMaxSurface3DHeight = cudaDeviceAttr.cudaDevAttrMaxSurface3DHeight + * cudaDevAttrMaxSurface3DDepth = cudaDeviceAttr.cudaDevAttrMaxSurface3DDepth + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxSurface3DWidth)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 71, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxSurface3DWidth, __pyx_t_4) < 0) __PYX_ERR(0, 71, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":72 + * cudaDevAttrMaxSurface2DHeight = cudaDeviceAttr.cudaDevAttrMaxSurface2DHeight + * cudaDevAttrMaxSurface3DWidth = cudaDeviceAttr.cudaDevAttrMaxSurface3DWidth + * cudaDevAttrMaxSurface3DHeight = cudaDeviceAttr.cudaDevAttrMaxSurface3DHeight # <<<<<<<<<<<<<< + * cudaDevAttrMaxSurface3DDepth = cudaDeviceAttr.cudaDevAttrMaxSurface3DDepth + * cudaDevAttrMaxSurface1DLayeredWidth = cudaDeviceAttr.cudaDevAttrMaxSurface1DLayeredWidth + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxSurface3DHeight)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 72, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxSurface3DHeight, __pyx_t_4) < 0) __PYX_ERR(0, 72, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":73 + * cudaDevAttrMaxSurface3DWidth = cudaDeviceAttr.cudaDevAttrMaxSurface3DWidth + * cudaDevAttrMaxSurface3DHeight = cudaDeviceAttr.cudaDevAttrMaxSurface3DHeight + * cudaDevAttrMaxSurface3DDepth = cudaDeviceAttr.cudaDevAttrMaxSurface3DDepth # <<<<<<<<<<<<<< + * cudaDevAttrMaxSurface1DLayeredWidth = cudaDeviceAttr.cudaDevAttrMaxSurface1DLayeredWidth + * cudaDevAttrMaxSurface1DLayeredLayers = cudaDeviceAttr.cudaDevAttrMaxSurface1DLayeredLayers + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxSurface3DDepth)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 73, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxSurface3DDepth, __pyx_t_4) < 0) __PYX_ERR(0, 73, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":74 + * cudaDevAttrMaxSurface3DHeight = cudaDeviceAttr.cudaDevAttrMaxSurface3DHeight + * cudaDevAttrMaxSurface3DDepth = cudaDeviceAttr.cudaDevAttrMaxSurface3DDepth + * cudaDevAttrMaxSurface1DLayeredWidth = cudaDeviceAttr.cudaDevAttrMaxSurface1DLayeredWidth # <<<<<<<<<<<<<< + * cudaDevAttrMaxSurface1DLayeredLayers = cudaDeviceAttr.cudaDevAttrMaxSurface1DLayeredLayers + * cudaDevAttrMaxSurface2DLayeredWidth = cudaDeviceAttr.cudaDevAttrMaxSurface2DLayeredWidth + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxSurface1DLayeredWidth)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 74, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxSurface1DLayeredWi, __pyx_t_4) < 0) __PYX_ERR(0, 74, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":75 + * cudaDevAttrMaxSurface3DDepth = cudaDeviceAttr.cudaDevAttrMaxSurface3DDepth + * cudaDevAttrMaxSurface1DLayeredWidth = cudaDeviceAttr.cudaDevAttrMaxSurface1DLayeredWidth + * cudaDevAttrMaxSurface1DLayeredLayers = cudaDeviceAttr.cudaDevAttrMaxSurface1DLayeredLayers # <<<<<<<<<<<<<< + * cudaDevAttrMaxSurface2DLayeredWidth = cudaDeviceAttr.cudaDevAttrMaxSurface2DLayeredWidth + * cudaDevAttrMaxSurface2DLayeredHeight = cudaDeviceAttr.cudaDevAttrMaxSurface2DLayeredHeight + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxSurface1DLayeredLayers)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 75, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxSurface1DLayeredLa, __pyx_t_4) < 0) __PYX_ERR(0, 75, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":76 + * cudaDevAttrMaxSurface1DLayeredWidth = cudaDeviceAttr.cudaDevAttrMaxSurface1DLayeredWidth + * cudaDevAttrMaxSurface1DLayeredLayers = cudaDeviceAttr.cudaDevAttrMaxSurface1DLayeredLayers + * cudaDevAttrMaxSurface2DLayeredWidth = cudaDeviceAttr.cudaDevAttrMaxSurface2DLayeredWidth # <<<<<<<<<<<<<< + * cudaDevAttrMaxSurface2DLayeredHeight = cudaDeviceAttr.cudaDevAttrMaxSurface2DLayeredHeight + * cudaDevAttrMaxSurface2DLayeredLayers = cudaDeviceAttr.cudaDevAttrMaxSurface2DLayeredLayers + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxSurface2DLayeredWidth)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 76, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxSurface2DLayeredWi, __pyx_t_4) < 0) __PYX_ERR(0, 76, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":77 + * cudaDevAttrMaxSurface1DLayeredLayers = cudaDeviceAttr.cudaDevAttrMaxSurface1DLayeredLayers + * cudaDevAttrMaxSurface2DLayeredWidth = cudaDeviceAttr.cudaDevAttrMaxSurface2DLayeredWidth + * cudaDevAttrMaxSurface2DLayeredHeight = cudaDeviceAttr.cudaDevAttrMaxSurface2DLayeredHeight # <<<<<<<<<<<<<< + * cudaDevAttrMaxSurface2DLayeredLayers = cudaDeviceAttr.cudaDevAttrMaxSurface2DLayeredLayers + * cudaDevAttrMaxSurfaceCubemapWidth = cudaDeviceAttr.cudaDevAttrMaxSurfaceCubemapWidth + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxSurface2DLayeredHeight)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 77, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxSurface2DLayeredHe, __pyx_t_4) < 0) __PYX_ERR(0, 77, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":78 + * cudaDevAttrMaxSurface2DLayeredWidth = cudaDeviceAttr.cudaDevAttrMaxSurface2DLayeredWidth + * cudaDevAttrMaxSurface2DLayeredHeight = cudaDeviceAttr.cudaDevAttrMaxSurface2DLayeredHeight + * cudaDevAttrMaxSurface2DLayeredLayers = cudaDeviceAttr.cudaDevAttrMaxSurface2DLayeredLayers # <<<<<<<<<<<<<< + * cudaDevAttrMaxSurfaceCubemapWidth = cudaDeviceAttr.cudaDevAttrMaxSurfaceCubemapWidth + * cudaDevAttrMaxSurfaceCubemapLayeredWidth = cudaDeviceAttr.cudaDevAttrMaxSurfaceCubemapLayeredWidth + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxSurface2DLayeredLayers)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 78, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxSurface2DLayeredLa, __pyx_t_4) < 0) __PYX_ERR(0, 78, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":79 + * cudaDevAttrMaxSurface2DLayeredHeight = cudaDeviceAttr.cudaDevAttrMaxSurface2DLayeredHeight + * cudaDevAttrMaxSurface2DLayeredLayers = cudaDeviceAttr.cudaDevAttrMaxSurface2DLayeredLayers + * cudaDevAttrMaxSurfaceCubemapWidth = cudaDeviceAttr.cudaDevAttrMaxSurfaceCubemapWidth # <<<<<<<<<<<<<< + * cudaDevAttrMaxSurfaceCubemapLayeredWidth = cudaDeviceAttr.cudaDevAttrMaxSurfaceCubemapLayeredWidth + * cudaDevAttrMaxSurfaceCubemapLayeredLayers = cudaDeviceAttr.cudaDevAttrMaxSurfaceCubemapLayeredLayers + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxSurfaceCubemapWidth)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 79, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxSurfaceCubemapWidt, __pyx_t_4) < 0) __PYX_ERR(0, 79, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":80 + * cudaDevAttrMaxSurface2DLayeredLayers = cudaDeviceAttr.cudaDevAttrMaxSurface2DLayeredLayers + * cudaDevAttrMaxSurfaceCubemapWidth = cudaDeviceAttr.cudaDevAttrMaxSurfaceCubemapWidth + * cudaDevAttrMaxSurfaceCubemapLayeredWidth = cudaDeviceAttr.cudaDevAttrMaxSurfaceCubemapLayeredWidth # <<<<<<<<<<<<<< + * cudaDevAttrMaxSurfaceCubemapLayeredLayers = cudaDeviceAttr.cudaDevAttrMaxSurfaceCubemapLayeredLayers + * cudaDevAttrMaxTexture1DLinearWidth = cudaDeviceAttr.cudaDevAttrMaxTexture1DLinearWidth + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxSurfaceCubemapLayeredWidth)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 80, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxSurfaceCubemapLaye, __pyx_t_4) < 0) __PYX_ERR(0, 80, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":81 + * cudaDevAttrMaxSurfaceCubemapWidth = cudaDeviceAttr.cudaDevAttrMaxSurfaceCubemapWidth + * cudaDevAttrMaxSurfaceCubemapLayeredWidth = cudaDeviceAttr.cudaDevAttrMaxSurfaceCubemapLayeredWidth + * cudaDevAttrMaxSurfaceCubemapLayeredLayers = cudaDeviceAttr.cudaDevAttrMaxSurfaceCubemapLayeredLayers # <<<<<<<<<<<<<< + * cudaDevAttrMaxTexture1DLinearWidth = cudaDeviceAttr.cudaDevAttrMaxTexture1DLinearWidth + * cudaDevAttrMaxTexture2DLinearWidth = cudaDeviceAttr.cudaDevAttrMaxTexture2DLinearWidth + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxSurfaceCubemapLayeredLayers)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 81, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxSurfaceCubemapLaye_2, __pyx_t_4) < 0) __PYX_ERR(0, 81, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":82 + * cudaDevAttrMaxSurfaceCubemapLayeredWidth = cudaDeviceAttr.cudaDevAttrMaxSurfaceCubemapLayeredWidth + * cudaDevAttrMaxSurfaceCubemapLayeredLayers = cudaDeviceAttr.cudaDevAttrMaxSurfaceCubemapLayeredLayers + * cudaDevAttrMaxTexture1DLinearWidth = cudaDeviceAttr.cudaDevAttrMaxTexture1DLinearWidth # <<<<<<<<<<<<<< + * cudaDevAttrMaxTexture2DLinearWidth = cudaDeviceAttr.cudaDevAttrMaxTexture2DLinearWidth + * cudaDevAttrMaxTexture2DLinearHeight = cudaDeviceAttr.cudaDevAttrMaxTexture2DLinearHeight + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxTexture1DLinearWidth)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 82, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxTexture1DLinearWid, __pyx_t_4) < 0) __PYX_ERR(0, 82, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":83 + * cudaDevAttrMaxSurfaceCubemapLayeredLayers = cudaDeviceAttr.cudaDevAttrMaxSurfaceCubemapLayeredLayers + * cudaDevAttrMaxTexture1DLinearWidth = cudaDeviceAttr.cudaDevAttrMaxTexture1DLinearWidth + * cudaDevAttrMaxTexture2DLinearWidth = cudaDeviceAttr.cudaDevAttrMaxTexture2DLinearWidth # <<<<<<<<<<<<<< + * cudaDevAttrMaxTexture2DLinearHeight = cudaDeviceAttr.cudaDevAttrMaxTexture2DLinearHeight + * cudaDevAttrMaxTexture2DLinearPitch = cudaDeviceAttr.cudaDevAttrMaxTexture2DLinearPitch + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxTexture2DLinearWidth)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 83, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxTexture2DLinearWid, __pyx_t_4) < 0) __PYX_ERR(0, 83, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":84 + * cudaDevAttrMaxTexture1DLinearWidth = cudaDeviceAttr.cudaDevAttrMaxTexture1DLinearWidth + * cudaDevAttrMaxTexture2DLinearWidth = cudaDeviceAttr.cudaDevAttrMaxTexture2DLinearWidth + * cudaDevAttrMaxTexture2DLinearHeight = cudaDeviceAttr.cudaDevAttrMaxTexture2DLinearHeight # <<<<<<<<<<<<<< + * cudaDevAttrMaxTexture2DLinearPitch = cudaDeviceAttr.cudaDevAttrMaxTexture2DLinearPitch + * cudaDevAttrMaxTexture2DMipmappedWidth = cudaDeviceAttr.cudaDevAttrMaxTexture2DMipmappedWidth + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxTexture2DLinearHeight)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 84, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxTexture2DLinearHei, __pyx_t_4) < 0) __PYX_ERR(0, 84, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":85 + * cudaDevAttrMaxTexture2DLinearWidth = cudaDeviceAttr.cudaDevAttrMaxTexture2DLinearWidth + * cudaDevAttrMaxTexture2DLinearHeight = cudaDeviceAttr.cudaDevAttrMaxTexture2DLinearHeight + * cudaDevAttrMaxTexture2DLinearPitch = cudaDeviceAttr.cudaDevAttrMaxTexture2DLinearPitch # <<<<<<<<<<<<<< + * cudaDevAttrMaxTexture2DMipmappedWidth = cudaDeviceAttr.cudaDevAttrMaxTexture2DMipmappedWidth + * cudaDevAttrMaxTexture2DMipmappedHeight = cudaDeviceAttr.cudaDevAttrMaxTexture2DMipmappedHeight + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxTexture2DLinearPitch)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 85, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxTexture2DLinearPit, __pyx_t_4) < 0) __PYX_ERR(0, 85, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":86 + * cudaDevAttrMaxTexture2DLinearHeight = cudaDeviceAttr.cudaDevAttrMaxTexture2DLinearHeight + * cudaDevAttrMaxTexture2DLinearPitch = cudaDeviceAttr.cudaDevAttrMaxTexture2DLinearPitch + * cudaDevAttrMaxTexture2DMipmappedWidth = cudaDeviceAttr.cudaDevAttrMaxTexture2DMipmappedWidth # <<<<<<<<<<<<<< + * cudaDevAttrMaxTexture2DMipmappedHeight = cudaDeviceAttr.cudaDevAttrMaxTexture2DMipmappedHeight + * cudaDevAttrComputeCapabilityMajor = cudaDeviceAttr.cudaDevAttrComputeCapabilityMajor + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxTexture2DMipmappedWidth)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 86, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxTexture2DMipmapped, __pyx_t_4) < 0) __PYX_ERR(0, 86, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":87 + * cudaDevAttrMaxTexture2DLinearPitch = cudaDeviceAttr.cudaDevAttrMaxTexture2DLinearPitch + * cudaDevAttrMaxTexture2DMipmappedWidth = cudaDeviceAttr.cudaDevAttrMaxTexture2DMipmappedWidth + * cudaDevAttrMaxTexture2DMipmappedHeight = cudaDeviceAttr.cudaDevAttrMaxTexture2DMipmappedHeight # <<<<<<<<<<<<<< + * cudaDevAttrComputeCapabilityMajor = cudaDeviceAttr.cudaDevAttrComputeCapabilityMajor + * cudaDevAttrComputeCapabilityMinor = cudaDeviceAttr.cudaDevAttrComputeCapabilityMinor + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxTexture2DMipmappedHeight)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 87, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxTexture2DMipmapped_2, __pyx_t_4) < 0) __PYX_ERR(0, 87, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":88 + * cudaDevAttrMaxTexture2DMipmappedWidth = cudaDeviceAttr.cudaDevAttrMaxTexture2DMipmappedWidth + * cudaDevAttrMaxTexture2DMipmappedHeight = cudaDeviceAttr.cudaDevAttrMaxTexture2DMipmappedHeight + * cudaDevAttrComputeCapabilityMajor = cudaDeviceAttr.cudaDevAttrComputeCapabilityMajor # <<<<<<<<<<<<<< + * cudaDevAttrComputeCapabilityMinor = cudaDeviceAttr.cudaDevAttrComputeCapabilityMinor + * cudaDevAttrMaxTexture1DMipmappedWidth = cudaDeviceAttr.cudaDevAttrMaxTexture1DMipmappedWidth + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrComputeCapabilityMajor)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 88, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrComputeCapabilityMajo, __pyx_t_4) < 0) __PYX_ERR(0, 88, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":89 + * cudaDevAttrMaxTexture2DMipmappedHeight = cudaDeviceAttr.cudaDevAttrMaxTexture2DMipmappedHeight + * cudaDevAttrComputeCapabilityMajor = cudaDeviceAttr.cudaDevAttrComputeCapabilityMajor + * cudaDevAttrComputeCapabilityMinor = cudaDeviceAttr.cudaDevAttrComputeCapabilityMinor # <<<<<<<<<<<<<< + * cudaDevAttrMaxTexture1DMipmappedWidth = cudaDeviceAttr.cudaDevAttrMaxTexture1DMipmappedWidth + * cudaDevAttrStreamPrioritiesSupported = cudaDeviceAttr.cudaDevAttrStreamPrioritiesSupported + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrComputeCapabilityMinor)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 89, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrComputeCapabilityMino, __pyx_t_4) < 0) __PYX_ERR(0, 89, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":90 + * cudaDevAttrComputeCapabilityMajor = cudaDeviceAttr.cudaDevAttrComputeCapabilityMajor + * cudaDevAttrComputeCapabilityMinor = cudaDeviceAttr.cudaDevAttrComputeCapabilityMinor + * cudaDevAttrMaxTexture1DMipmappedWidth = cudaDeviceAttr.cudaDevAttrMaxTexture1DMipmappedWidth # <<<<<<<<<<<<<< + * cudaDevAttrStreamPrioritiesSupported = cudaDeviceAttr.cudaDevAttrStreamPrioritiesSupported + * cudaDevAttrGlobalL1CacheSupported = cudaDeviceAttr.cudaDevAttrGlobalL1CacheSupported + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxTexture1DMipmappedWidth)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 90, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxTexture1DMipmapped, __pyx_t_4) < 0) __PYX_ERR(0, 90, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":91 + * cudaDevAttrComputeCapabilityMinor = cudaDeviceAttr.cudaDevAttrComputeCapabilityMinor + * cudaDevAttrMaxTexture1DMipmappedWidth = cudaDeviceAttr.cudaDevAttrMaxTexture1DMipmappedWidth + * cudaDevAttrStreamPrioritiesSupported = cudaDeviceAttr.cudaDevAttrStreamPrioritiesSupported # <<<<<<<<<<<<<< + * cudaDevAttrGlobalL1CacheSupported = cudaDeviceAttr.cudaDevAttrGlobalL1CacheSupported + * cudaDevAttrLocalL1CacheSupported = cudaDeviceAttr.cudaDevAttrLocalL1CacheSupported + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrStreamPrioritiesSupported)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 91, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrStreamPrioritiesSuppo, __pyx_t_4) < 0) __PYX_ERR(0, 91, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":92 + * cudaDevAttrMaxTexture1DMipmappedWidth = cudaDeviceAttr.cudaDevAttrMaxTexture1DMipmappedWidth + * cudaDevAttrStreamPrioritiesSupported = cudaDeviceAttr.cudaDevAttrStreamPrioritiesSupported + * cudaDevAttrGlobalL1CacheSupported = cudaDeviceAttr.cudaDevAttrGlobalL1CacheSupported # <<<<<<<<<<<<<< + * cudaDevAttrLocalL1CacheSupported = cudaDeviceAttr.cudaDevAttrLocalL1CacheSupported + * cudaDevAttrMaxSharedMemoryPerMultiprocessor = cudaDeviceAttr.cudaDevAttrMaxSharedMemoryPerMultiprocessor + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrGlobalL1CacheSupported)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 92, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrGlobalL1CacheSupporte, __pyx_t_4) < 0) __PYX_ERR(0, 92, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":93 + * cudaDevAttrStreamPrioritiesSupported = cudaDeviceAttr.cudaDevAttrStreamPrioritiesSupported + * cudaDevAttrGlobalL1CacheSupported = cudaDeviceAttr.cudaDevAttrGlobalL1CacheSupported + * cudaDevAttrLocalL1CacheSupported = cudaDeviceAttr.cudaDevAttrLocalL1CacheSupported # <<<<<<<<<<<<<< + * cudaDevAttrMaxSharedMemoryPerMultiprocessor = cudaDeviceAttr.cudaDevAttrMaxSharedMemoryPerMultiprocessor + * cudaDevAttrMaxRegistersPerMultiprocessor = cudaDeviceAttr.cudaDevAttrMaxRegistersPerMultiprocessor + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrLocalL1CacheSupported)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 93, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrLocalL1CacheSupported, __pyx_t_4) < 0) __PYX_ERR(0, 93, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":94 + * cudaDevAttrGlobalL1CacheSupported = cudaDeviceAttr.cudaDevAttrGlobalL1CacheSupported + * cudaDevAttrLocalL1CacheSupported = cudaDeviceAttr.cudaDevAttrLocalL1CacheSupported + * cudaDevAttrMaxSharedMemoryPerMultiprocessor = cudaDeviceAttr.cudaDevAttrMaxSharedMemoryPerMultiprocessor # <<<<<<<<<<<<<< + * cudaDevAttrMaxRegistersPerMultiprocessor = cudaDeviceAttr.cudaDevAttrMaxRegistersPerMultiprocessor + * cudaDevAttrManagedMemory = cudaDeviceAttr.cudaDevAttrManagedMemory + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxSharedMemoryPerMultiprocessor)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 94, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxSharedMemoryPerMul, __pyx_t_4) < 0) __PYX_ERR(0, 94, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":95 + * cudaDevAttrLocalL1CacheSupported = cudaDeviceAttr.cudaDevAttrLocalL1CacheSupported + * cudaDevAttrMaxSharedMemoryPerMultiprocessor = cudaDeviceAttr.cudaDevAttrMaxSharedMemoryPerMultiprocessor + * cudaDevAttrMaxRegistersPerMultiprocessor = cudaDeviceAttr.cudaDevAttrMaxRegistersPerMultiprocessor # <<<<<<<<<<<<<< + * cudaDevAttrManagedMemory = cudaDeviceAttr.cudaDevAttrManagedMemory + * cudaDevAttrIsMultiGpuBoard = cudaDeviceAttr.cudaDevAttrIsMultiGpuBoard + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxRegistersPerMultiprocessor)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 95, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxRegistersPerMultip, __pyx_t_4) < 0) __PYX_ERR(0, 95, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":96 + * cudaDevAttrMaxSharedMemoryPerMultiprocessor = cudaDeviceAttr.cudaDevAttrMaxSharedMemoryPerMultiprocessor + * cudaDevAttrMaxRegistersPerMultiprocessor = cudaDeviceAttr.cudaDevAttrMaxRegistersPerMultiprocessor + * cudaDevAttrManagedMemory = cudaDeviceAttr.cudaDevAttrManagedMemory # <<<<<<<<<<<<<< + * cudaDevAttrIsMultiGpuBoard = cudaDeviceAttr.cudaDevAttrIsMultiGpuBoard + * cudaDevAttrMultiGpuBoardGroupID = cudaDeviceAttr.cudaDevAttrMultiGpuBoardGroupID + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrManagedMemory)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 96, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrManagedMemory, __pyx_t_4) < 0) __PYX_ERR(0, 96, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":97 + * cudaDevAttrMaxRegistersPerMultiprocessor = cudaDeviceAttr.cudaDevAttrMaxRegistersPerMultiprocessor + * cudaDevAttrManagedMemory = cudaDeviceAttr.cudaDevAttrManagedMemory + * cudaDevAttrIsMultiGpuBoard = cudaDeviceAttr.cudaDevAttrIsMultiGpuBoard # <<<<<<<<<<<<<< + * cudaDevAttrMultiGpuBoardGroupID = cudaDeviceAttr.cudaDevAttrMultiGpuBoardGroupID + * cudaDevAttrHostNativeAtomicSupported = cudaDeviceAttr.cudaDevAttrHostNativeAtomicSupported + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrIsMultiGpuBoard)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 97, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrIsMultiGpuBoard, __pyx_t_4) < 0) __PYX_ERR(0, 97, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":98 + * cudaDevAttrManagedMemory = cudaDeviceAttr.cudaDevAttrManagedMemory + * cudaDevAttrIsMultiGpuBoard = cudaDeviceAttr.cudaDevAttrIsMultiGpuBoard + * cudaDevAttrMultiGpuBoardGroupID = cudaDeviceAttr.cudaDevAttrMultiGpuBoardGroupID # <<<<<<<<<<<<<< + * cudaDevAttrHostNativeAtomicSupported = cudaDeviceAttr.cudaDevAttrHostNativeAtomicSupported + * cudaDevAttrSingleToDoublePrecisionPerfRatio = cudaDeviceAttr.cudaDevAttrSingleToDoublePrecisionPerfRatio + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMultiGpuBoardGroupID)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 98, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMultiGpuBoardGroupID, __pyx_t_4) < 0) __PYX_ERR(0, 98, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":99 + * cudaDevAttrIsMultiGpuBoard = cudaDeviceAttr.cudaDevAttrIsMultiGpuBoard + * cudaDevAttrMultiGpuBoardGroupID = cudaDeviceAttr.cudaDevAttrMultiGpuBoardGroupID + * cudaDevAttrHostNativeAtomicSupported = cudaDeviceAttr.cudaDevAttrHostNativeAtomicSupported # <<<<<<<<<<<<<< + * cudaDevAttrSingleToDoublePrecisionPerfRatio = cudaDeviceAttr.cudaDevAttrSingleToDoublePrecisionPerfRatio + * cudaDevAttrPageableMemoryAccess = cudaDeviceAttr.cudaDevAttrPageableMemoryAccess + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrHostNativeAtomicSupported)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 99, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrHostNativeAtomicSuppo, __pyx_t_4) < 0) __PYX_ERR(0, 99, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":100 + * cudaDevAttrMultiGpuBoardGroupID = cudaDeviceAttr.cudaDevAttrMultiGpuBoardGroupID + * cudaDevAttrHostNativeAtomicSupported = cudaDeviceAttr.cudaDevAttrHostNativeAtomicSupported + * cudaDevAttrSingleToDoublePrecisionPerfRatio = cudaDeviceAttr.cudaDevAttrSingleToDoublePrecisionPerfRatio # <<<<<<<<<<<<<< + * cudaDevAttrPageableMemoryAccess = cudaDeviceAttr.cudaDevAttrPageableMemoryAccess + * cudaDevAttrConcurrentManagedAccess = cudaDeviceAttr.cudaDevAttrConcurrentManagedAccess + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrSingleToDoublePrecisionPerfRatio)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 100, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrSingleToDoublePrecisi, __pyx_t_4) < 0) __PYX_ERR(0, 100, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":101 + * cudaDevAttrHostNativeAtomicSupported = cudaDeviceAttr.cudaDevAttrHostNativeAtomicSupported + * cudaDevAttrSingleToDoublePrecisionPerfRatio = cudaDeviceAttr.cudaDevAttrSingleToDoublePrecisionPerfRatio + * cudaDevAttrPageableMemoryAccess = cudaDeviceAttr.cudaDevAttrPageableMemoryAccess # <<<<<<<<<<<<<< + * cudaDevAttrConcurrentManagedAccess = cudaDeviceAttr.cudaDevAttrConcurrentManagedAccess + * cudaDevAttrComputePreemptionSupported = cudaDeviceAttr.cudaDevAttrComputePreemptionSupported + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrPageableMemoryAccess)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 101, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrPageableMemoryAccess, __pyx_t_4) < 0) __PYX_ERR(0, 101, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":102 + * cudaDevAttrSingleToDoublePrecisionPerfRatio = cudaDeviceAttr.cudaDevAttrSingleToDoublePrecisionPerfRatio + * cudaDevAttrPageableMemoryAccess = cudaDeviceAttr.cudaDevAttrPageableMemoryAccess + * cudaDevAttrConcurrentManagedAccess = cudaDeviceAttr.cudaDevAttrConcurrentManagedAccess # <<<<<<<<<<<<<< + * cudaDevAttrComputePreemptionSupported = cudaDeviceAttr.cudaDevAttrComputePreemptionSupported + * cudaDevAttrCanUseHostPointerForRegisteredMem = cudaDeviceAttr.cudaDevAttrCanUseHostPointerForRegisteredMem + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrConcurrentManagedAccess)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 102, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrConcurrentManagedAcce, __pyx_t_4) < 0) __PYX_ERR(0, 102, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":103 + * cudaDevAttrPageableMemoryAccess = cudaDeviceAttr.cudaDevAttrPageableMemoryAccess + * cudaDevAttrConcurrentManagedAccess = cudaDeviceAttr.cudaDevAttrConcurrentManagedAccess + * cudaDevAttrComputePreemptionSupported = cudaDeviceAttr.cudaDevAttrComputePreemptionSupported # <<<<<<<<<<<<<< + * cudaDevAttrCanUseHostPointerForRegisteredMem = cudaDeviceAttr.cudaDevAttrCanUseHostPointerForRegisteredMem + * cudaDevAttrReserved92 = cudaDeviceAttr.cudaDevAttrReserved92 + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrComputePreemptionSupported)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 103, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrComputePreemptionSupp, __pyx_t_4) < 0) __PYX_ERR(0, 103, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":104 + * cudaDevAttrConcurrentManagedAccess = cudaDeviceAttr.cudaDevAttrConcurrentManagedAccess + * cudaDevAttrComputePreemptionSupported = cudaDeviceAttr.cudaDevAttrComputePreemptionSupported + * cudaDevAttrCanUseHostPointerForRegisteredMem = cudaDeviceAttr.cudaDevAttrCanUseHostPointerForRegisteredMem # <<<<<<<<<<<<<< + * cudaDevAttrReserved92 = cudaDeviceAttr.cudaDevAttrReserved92 + * cudaDevAttrReserved93 = cudaDeviceAttr.cudaDevAttrReserved93 + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrCanUseHostPointerForRegisteredMem)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 104, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrCanUseHostPointerForR, __pyx_t_4) < 0) __PYX_ERR(0, 104, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":105 + * cudaDevAttrComputePreemptionSupported = cudaDeviceAttr.cudaDevAttrComputePreemptionSupported + * cudaDevAttrCanUseHostPointerForRegisteredMem = cudaDeviceAttr.cudaDevAttrCanUseHostPointerForRegisteredMem + * cudaDevAttrReserved92 = cudaDeviceAttr.cudaDevAttrReserved92 # <<<<<<<<<<<<<< + * cudaDevAttrReserved93 = cudaDeviceAttr.cudaDevAttrReserved93 + * cudaDevAttrReserved94 = cudaDeviceAttr.cudaDevAttrReserved94 + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrReserved92)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 105, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrReserved92, __pyx_t_4) < 0) __PYX_ERR(0, 105, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":106 + * cudaDevAttrCanUseHostPointerForRegisteredMem = cudaDeviceAttr.cudaDevAttrCanUseHostPointerForRegisteredMem + * cudaDevAttrReserved92 = cudaDeviceAttr.cudaDevAttrReserved92 + * cudaDevAttrReserved93 = cudaDeviceAttr.cudaDevAttrReserved93 # <<<<<<<<<<<<<< + * cudaDevAttrReserved94 = cudaDeviceAttr.cudaDevAttrReserved94 + * cudaDevAttrCooperativeLaunch = cudaDeviceAttr.cudaDevAttrCooperativeLaunch + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrReserved93)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 106, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrReserved93, __pyx_t_4) < 0) __PYX_ERR(0, 106, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":107 + * cudaDevAttrReserved92 = cudaDeviceAttr.cudaDevAttrReserved92 + * cudaDevAttrReserved93 = cudaDeviceAttr.cudaDevAttrReserved93 + * cudaDevAttrReserved94 = cudaDeviceAttr.cudaDevAttrReserved94 # <<<<<<<<<<<<<< + * cudaDevAttrCooperativeLaunch = cudaDeviceAttr.cudaDevAttrCooperativeLaunch + * cudaDevAttrCooperativeMultiDeviceLaunch = cudaDeviceAttr.cudaDevAttrCooperativeMultiDeviceLaunch + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrReserved94)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 107, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrReserved94, __pyx_t_4) < 0) __PYX_ERR(0, 107, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":108 + * cudaDevAttrReserved93 = cudaDeviceAttr.cudaDevAttrReserved93 + * cudaDevAttrReserved94 = cudaDeviceAttr.cudaDevAttrReserved94 + * cudaDevAttrCooperativeLaunch = cudaDeviceAttr.cudaDevAttrCooperativeLaunch # <<<<<<<<<<<<<< + * cudaDevAttrCooperativeMultiDeviceLaunch = cudaDeviceAttr.cudaDevAttrCooperativeMultiDeviceLaunch + * cudaDevAttrMaxSharedMemoryPerBlockOptin = cudaDeviceAttr.cudaDevAttrMaxSharedMemoryPerBlockOptin + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrCooperativeLaunch)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 108, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrCooperativeLaunch, __pyx_t_4) < 0) __PYX_ERR(0, 108, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":109 + * cudaDevAttrReserved94 = cudaDeviceAttr.cudaDevAttrReserved94 + * cudaDevAttrCooperativeLaunch = cudaDeviceAttr.cudaDevAttrCooperativeLaunch + * cudaDevAttrCooperativeMultiDeviceLaunch = cudaDeviceAttr.cudaDevAttrCooperativeMultiDeviceLaunch # <<<<<<<<<<<<<< + * cudaDevAttrMaxSharedMemoryPerBlockOptin = cudaDeviceAttr.cudaDevAttrMaxSharedMemoryPerBlockOptin + * cudaDevAttrCanFlushRemoteWrites = cudaDeviceAttr.cudaDevAttrCanFlushRemoteWrites + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrCooperativeMultiDeviceLaunch)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 109, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrCooperativeMultiDevic, __pyx_t_4) < 0) __PYX_ERR(0, 109, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":110 + * cudaDevAttrCooperativeLaunch = cudaDeviceAttr.cudaDevAttrCooperativeLaunch + * cudaDevAttrCooperativeMultiDeviceLaunch = cudaDeviceAttr.cudaDevAttrCooperativeMultiDeviceLaunch + * cudaDevAttrMaxSharedMemoryPerBlockOptin = cudaDeviceAttr.cudaDevAttrMaxSharedMemoryPerBlockOptin # <<<<<<<<<<<<<< + * cudaDevAttrCanFlushRemoteWrites = cudaDeviceAttr.cudaDevAttrCanFlushRemoteWrites + * cudaDevAttrHostRegisterSupported = cudaDeviceAttr.cudaDevAttrHostRegisterSupported + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxSharedMemoryPerBlockOptin)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 110, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxSharedMemoryPerBlo_2, __pyx_t_4) < 0) __PYX_ERR(0, 110, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":111 + * cudaDevAttrCooperativeMultiDeviceLaunch = cudaDeviceAttr.cudaDevAttrCooperativeMultiDeviceLaunch + * cudaDevAttrMaxSharedMemoryPerBlockOptin = cudaDeviceAttr.cudaDevAttrMaxSharedMemoryPerBlockOptin + * cudaDevAttrCanFlushRemoteWrites = cudaDeviceAttr.cudaDevAttrCanFlushRemoteWrites # <<<<<<<<<<<<<< + * cudaDevAttrHostRegisterSupported = cudaDeviceAttr.cudaDevAttrHostRegisterSupported + * cudaDevAttrPageableMemoryAccessUsesHostPageTables = cudaDeviceAttr.cudaDevAttrPageableMemoryAccessUsesHostPageTables + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrCanFlushRemoteWrites)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 111, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrCanFlushRemoteWrites, __pyx_t_4) < 0) __PYX_ERR(0, 111, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":112 + * cudaDevAttrMaxSharedMemoryPerBlockOptin = cudaDeviceAttr.cudaDevAttrMaxSharedMemoryPerBlockOptin + * cudaDevAttrCanFlushRemoteWrites = cudaDeviceAttr.cudaDevAttrCanFlushRemoteWrites + * cudaDevAttrHostRegisterSupported = cudaDeviceAttr.cudaDevAttrHostRegisterSupported # <<<<<<<<<<<<<< + * cudaDevAttrPageableMemoryAccessUsesHostPageTables = cudaDeviceAttr.cudaDevAttrPageableMemoryAccessUsesHostPageTables + * cudaDevAttrDirectManagedMemAccessFromHost = cudaDeviceAttr.cudaDevAttrDirectManagedMemAccessFromHost + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrHostRegisterSupported)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 112, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrHostRegisterSupported, __pyx_t_4) < 0) __PYX_ERR(0, 112, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":113 + * cudaDevAttrCanFlushRemoteWrites = cudaDeviceAttr.cudaDevAttrCanFlushRemoteWrites + * cudaDevAttrHostRegisterSupported = cudaDeviceAttr.cudaDevAttrHostRegisterSupported + * cudaDevAttrPageableMemoryAccessUsesHostPageTables = cudaDeviceAttr.cudaDevAttrPageableMemoryAccessUsesHostPageTables # <<<<<<<<<<<<<< + * cudaDevAttrDirectManagedMemAccessFromHost = cudaDeviceAttr.cudaDevAttrDirectManagedMemAccessFromHost + * + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrPageableMemoryAccessUsesHostPageTables)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 113, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrPageableMemoryAccessU, __pyx_t_4) < 0) __PYX_ERR(0, 113, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":114 + * cudaDevAttrHostRegisterSupported = cudaDeviceAttr.cudaDevAttrHostRegisterSupported + * cudaDevAttrPageableMemoryAccessUsesHostPageTables = cudaDeviceAttr.cudaDevAttrPageableMemoryAccessUsesHostPageTables + * cudaDevAttrDirectManagedMemAccessFromHost = cudaDeviceAttr.cudaDevAttrDirectManagedMemAccessFromHost # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrDirectManagedMemAccessFromHost)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 114, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrDirectManagedMemAcces, __pyx_t_4) < 0) __PYX_ERR(0, 114, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "cudf/_cuda/gpu.pyx":14 + * + * + * class CudaDeviceAttr(IntEnum): # <<<<<<<<<<<<<< + * cudaDevAttrMaxThreadsPerBlock = cudaDeviceAttr.cudaDevAttrMaxThreadsPerBlock + * cudaDevAttrMaxBlockDimX = cudaDeviceAttr.cudaDevAttrMaxBlockDimX + */ + __pyx_t_4 = __Pyx_Py3ClassCreate(__pyx_t_2, __pyx_n_s_CudaDeviceAttr, __pyx_t_1, __pyx_t_3, NULL, 0, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_CudaDeviceAttr, __pyx_t_4) < 0) __PYX_ERR(0, 14, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "cudf/_cuda/gpu.pyx":1 + * # Copyright (c) 2020, NVIDIA CORPORATION. # <<<<<<<<<<<<<< + * + * from cudf._cuda.gpu cimport ( + */ + __pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /*--- Wrapped vars code ---*/ + + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + if (__pyx_m) { + if (__pyx_d) { + __Pyx_AddTraceback("init cudf._cuda.gpu", __pyx_clineno, __pyx_lineno, __pyx_filename); + } + Py_CLEAR(__pyx_m); + } else if (!PyErr_Occurred()) { + PyErr_SetString(PyExc_ImportError, "init cudf._cuda.gpu"); + } + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + #if CYTHON_PEP489_MULTI_PHASE_INIT + return (__pyx_m != NULL) ? 0 : -1; + #elif PY_MAJOR_VERSION >= 3 + return __pyx_m; + #else + return; + #endif +} + +/* --- Runtime support code --- */ +/* Refnanny */ +#if CYTHON_REFNANNY +static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { + PyObject *m = NULL, *p = NULL; + void *r = NULL; + m = PyImport_ImportModule(modname); + if (!m) goto end; + p = PyObject_GetAttrString(m, "RefNannyAPI"); + if (!p) goto end; + r = PyLong_AsVoidPtr(p); +end: + Py_XDECREF(p); + Py_XDECREF(m); + return (__Pyx_RefNannyAPIStruct *)r; +} +#endif + +/* RaiseArgTupleInvalid */ +static void __Pyx_RaiseArgtupleInvalid( + const char* func_name, + int exact, + Py_ssize_t num_min, + Py_ssize_t num_max, + Py_ssize_t num_found) +{ + Py_ssize_t num_expected; + const char *more_or_less; + if (num_found < num_min) { + num_expected = num_min; + more_or_less = "at least"; + } else { + num_expected = num_max; + more_or_less = "at most"; + } + if (exact) { + more_or_less = "exactly"; + } + PyErr_Format(PyExc_TypeError, + "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", + func_name, more_or_less, num_expected, + (num_expected == 1) ? "" : "s", num_found); +} + +/* RaiseDoubleKeywords */ +static void __Pyx_RaiseDoubleKeywordsError( + const char* func_name, + PyObject* kw_name) +{ + PyErr_Format(PyExc_TypeError, + #if PY_MAJOR_VERSION >= 3 + "%s() got multiple values for keyword argument '%U'", func_name, kw_name); + #else + "%s() got multiple values for keyword argument '%s'", func_name, + PyString_AsString(kw_name)); + #endif +} + +/* ParseKeywords */ +static int __Pyx_ParseOptionalKeywords( + PyObject *kwds, + PyObject **argnames[], + PyObject *kwds2, + PyObject *values[], + Py_ssize_t num_pos_args, + const char* function_name) +{ + PyObject *key = 0, *value = 0; + Py_ssize_t pos = 0; + PyObject*** name; + PyObject*** first_kw_arg = argnames + num_pos_args; + while (PyDict_Next(kwds, &pos, &key, &value)) { + name = first_kw_arg; + while (*name && (**name != key)) name++; + if (*name) { + values[name-argnames] = value; + continue; + } + name = first_kw_arg; + #if PY_MAJOR_VERSION < 3 + if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) { + while (*name) { + if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) + && _PyString_Eq(**name, key)) { + values[name-argnames] = value; + break; + } + name++; + } + if (*name) continue; + else { + PyObject*** argname = argnames; + while (argname != first_kw_arg) { + if ((**argname == key) || ( + (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) + && _PyString_Eq(**argname, key))) { + goto arg_passed_twice; + } + argname++; + } + } + } else + #endif + if (likely(PyUnicode_Check(key))) { + while (*name) { + int cmp = (**name == key) ? 0 : + #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 + (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : + #endif + PyUnicode_Compare(**name, key); + if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; + if (cmp == 0) { + values[name-argnames] = value; + break; + } + name++; + } + if (*name) continue; + else { + PyObject*** argname = argnames; + while (argname != first_kw_arg) { + int cmp = (**argname == key) ? 0 : + #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 + (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : + #endif + PyUnicode_Compare(**argname, key); + if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; + if (cmp == 0) goto arg_passed_twice; + argname++; + } + } + } else + goto invalid_keyword_type; + if (kwds2) { + if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; + } else { + goto invalid_keyword; + } + } + return 0; +arg_passed_twice: + __Pyx_RaiseDoubleKeywordsError(function_name, key); + goto bad; +invalid_keyword_type: + PyErr_Format(PyExc_TypeError, + "%.200s() keywords must be strings", function_name); + goto bad; +invalid_keyword: + PyErr_Format(PyExc_TypeError, + #if PY_MAJOR_VERSION < 3 + "%.200s() got an unexpected keyword argument '%.200s'", + function_name, PyString_AsString(key)); + #else + "%s() got an unexpected keyword argument '%U'", + function_name, key); + #endif +bad: + return -1; +} + +/* PyObjectGetAttrStr */ +#if CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { + PyTypeObject* tp = Py_TYPE(obj); + if (likely(tp->tp_getattro)) + return tp->tp_getattro(obj, attr_name); +#if PY_MAJOR_VERSION < 3 + if (likely(tp->tp_getattr)) + return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); +#endif + return PyObject_GetAttr(obj, attr_name); +} +#endif + +/* Import */ +static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { + PyObject *empty_list = 0; + PyObject *module = 0; + PyObject *global_dict = 0; + PyObject *empty_dict = 0; + PyObject *list; + #if PY_MAJOR_VERSION < 3 + PyObject *py_import; + py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); + if (!py_import) + goto bad; + #endif + if (from_list) + list = from_list; + else { + empty_list = PyList_New(0); + if (!empty_list) + goto bad; + list = empty_list; + } + global_dict = PyModule_GetDict(__pyx_m); + if (!global_dict) + goto bad; + empty_dict = PyDict_New(); + if (!empty_dict) + goto bad; + { + #if PY_MAJOR_VERSION >= 3 + if (level == -1) { + if (strchr(__Pyx_MODULE_NAME, '.')) { + module = PyImport_ImportModuleLevelObject( + name, global_dict, empty_dict, list, 1); + if (!module) { + if (!PyErr_ExceptionMatches(PyExc_ImportError)) + goto bad; + PyErr_Clear(); + } + } + level = 0; + } + #endif + if (!module) { + #if PY_MAJOR_VERSION < 3 + PyObject *py_level = PyInt_FromLong(level); + if (!py_level) + goto bad; + module = PyObject_CallFunctionObjArgs(py_import, + name, global_dict, empty_dict, list, py_level, (PyObject *)NULL); + Py_DECREF(py_level); + #else + module = PyImport_ImportModuleLevelObject( + name, global_dict, empty_dict, list, level); + #endif + } + } +bad: + #if PY_MAJOR_VERSION < 3 + Py_XDECREF(py_import); + #endif + Py_XDECREF(empty_list); + Py_XDECREF(empty_dict); + return module; +} + +/* ImportFrom */ +static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) { + PyObject* value = __Pyx_PyObject_GetAttrStr(module, name); + if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) { + PyErr_Format(PyExc_ImportError, + #if PY_MAJOR_VERSION < 3 + "cannot import name %.230s", PyString_AS_STRING(name)); + #else + "cannot import name %S", name); + #endif + } + return value; +} + +/* GetBuiltinName */ +static PyObject *__Pyx_GetBuiltinName(PyObject *name) { + PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); + if (unlikely(!result)) { + PyErr_Format(PyExc_NameError, +#if PY_MAJOR_VERSION >= 3 + "name '%U' is not defined", name); +#else + "name '%.200s' is not defined", PyString_AS_STRING(name)); +#endif + } + return result; +} + +/* PyDictVersioning */ +#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) { + PyObject *dict = Py_TYPE(obj)->tp_dict; + return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0; +} +static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) { + PyObject **dictptr = NULL; + Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset; + if (offset) { +#if CYTHON_COMPILING_IN_CPYTHON + dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj); +#else + dictptr = _PyObject_GetDictPtr(obj); +#endif + } + return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0; +} +static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) { + PyObject *dict = Py_TYPE(obj)->tp_dict; + if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict))) + return 0; + return obj_dict_version == __Pyx_get_object_dict_version(obj); +} +#endif + +/* GetModuleGlobalName */ +#if CYTHON_USE_DICT_VERSIONS +static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value) +#else +static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name) +#endif +{ + PyObject *result; +#if !CYTHON_AVOID_BORROWED_REFS +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 + result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash); + __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) + if (likely(result)) { + return __Pyx_NewRef(result); + } else if (unlikely(PyErr_Occurred())) { + return NULL; + } +#else + result = PyDict_GetItem(__pyx_d, name); + __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) + if (likely(result)) { + return __Pyx_NewRef(result); + } +#endif +#else + result = PyObject_GetItem(__pyx_d, name); + __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) + if (likely(result)) { + return __Pyx_NewRef(result); + } + PyErr_Clear(); +#endif + return __Pyx_GetBuiltinName(name); +} + +/* CalculateMetaclass */ +static PyObject *__Pyx_CalculateMetaclass(PyTypeObject *metaclass, PyObject *bases) { + Py_ssize_t i, nbases = PyTuple_GET_SIZE(bases); + for (i=0; i < nbases; i++) { + PyTypeObject *tmptype; + PyObject *tmp = PyTuple_GET_ITEM(bases, i); + tmptype = Py_TYPE(tmp); +#if PY_MAJOR_VERSION < 3 + if (tmptype == &PyClass_Type) + continue; +#endif + if (!metaclass) { + metaclass = tmptype; + continue; + } + if (PyType_IsSubtype(metaclass, tmptype)) + continue; + if (PyType_IsSubtype(tmptype, metaclass)) { + metaclass = tmptype; + continue; + } + PyErr_SetString(PyExc_TypeError, + "metaclass conflict: " + "the metaclass of a derived class " + "must be a (non-strict) subclass " + "of the metaclasses of all its bases"); + return NULL; + } + if (!metaclass) { +#if PY_MAJOR_VERSION < 3 + metaclass = &PyClass_Type; +#else + metaclass = &PyType_Type; +#endif + } + Py_INCREF((PyObject*) metaclass); + return (PyObject*) metaclass; +} + +/* Py3ClassCreate */ +static PyObject *__Pyx_Py3MetaclassPrepare(PyObject *metaclass, PyObject *bases, PyObject *name, + PyObject *qualname, PyObject *mkw, PyObject *modname, PyObject *doc) { + PyObject *ns; + if (metaclass) { + PyObject *prep = __Pyx_PyObject_GetAttrStr(metaclass, __pyx_n_s_prepare); + if (prep) { + PyObject *pargs = PyTuple_Pack(2, name, bases); + if (unlikely(!pargs)) { + Py_DECREF(prep); + return NULL; + } + ns = PyObject_Call(prep, pargs, mkw); + Py_DECREF(prep); + Py_DECREF(pargs); + } else { + if (unlikely(!PyErr_ExceptionMatches(PyExc_AttributeError))) + return NULL; + PyErr_Clear(); + ns = PyDict_New(); + } + } else { + ns = PyDict_New(); + } + if (unlikely(!ns)) + return NULL; + if (unlikely(PyObject_SetItem(ns, __pyx_n_s_module, modname) < 0)) goto bad; + if (unlikely(PyObject_SetItem(ns, __pyx_n_s_qualname, qualname) < 0)) goto bad; + if (unlikely(doc && PyObject_SetItem(ns, __pyx_n_s_doc, doc) < 0)) goto bad; + return ns; +bad: + Py_DECREF(ns); + return NULL; +} +static PyObject *__Pyx_Py3ClassCreate(PyObject *metaclass, PyObject *name, PyObject *bases, + PyObject *dict, PyObject *mkw, + int calculate_metaclass, int allow_py2_metaclass) { + PyObject *result, *margs; + PyObject *owned_metaclass = NULL; + if (allow_py2_metaclass) { + owned_metaclass = PyObject_GetItem(dict, __pyx_n_s_metaclass); + if (owned_metaclass) { + metaclass = owned_metaclass; + } else if (likely(PyErr_ExceptionMatches(PyExc_KeyError))) { + PyErr_Clear(); + } else { + return NULL; + } + } + if (calculate_metaclass && (!metaclass || PyType_Check(metaclass))) { + metaclass = __Pyx_CalculateMetaclass((PyTypeObject*) metaclass, bases); + Py_XDECREF(owned_metaclass); + if (unlikely(!metaclass)) + return NULL; + owned_metaclass = metaclass; + } + margs = PyTuple_Pack(3, name, bases, dict); + if (unlikely(!margs)) { + result = NULL; + } else { + result = PyObject_Call(metaclass, margs, mkw); + Py_DECREF(margs); + } + Py_XDECREF(owned_metaclass); + return result; +} + +/* PyErrFetchRestore */ +#if CYTHON_FAST_THREAD_STATE +static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { + PyObject *tmp_type, *tmp_value, *tmp_tb; + tmp_type = tstate->curexc_type; + tmp_value = tstate->curexc_value; + tmp_tb = tstate->curexc_traceback; + tstate->curexc_type = type; + tstate->curexc_value = value; + tstate->curexc_traceback = tb; + Py_XDECREF(tmp_type); + Py_XDECREF(tmp_value); + Py_XDECREF(tmp_tb); +} +static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { + *type = tstate->curexc_type; + *value = tstate->curexc_value; + *tb = tstate->curexc_traceback; + tstate->curexc_type = 0; + tstate->curexc_value = 0; + tstate->curexc_traceback = 0; +} +#endif + +/* CLineInTraceback */ +#ifndef CYTHON_CLINE_IN_TRACEBACK +static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line) { + PyObject *use_cline; + PyObject *ptype, *pvalue, *ptraceback; +#if CYTHON_COMPILING_IN_CPYTHON + PyObject **cython_runtime_dict; +#endif + if (unlikely(!__pyx_cython_runtime)) { + return c_line; + } + __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); +#if CYTHON_COMPILING_IN_CPYTHON + cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); + if (likely(cython_runtime_dict)) { + __PYX_PY_DICT_LOOKUP_IF_MODIFIED( + use_cline, *cython_runtime_dict, + __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback)) + } else +#endif + { + PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); + if (use_cline_obj) { + use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; + Py_DECREF(use_cline_obj); + } else { + PyErr_Clear(); + use_cline = NULL; + } + } + if (!use_cline) { + c_line = 0; + PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); + } + else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) { + c_line = 0; + } + __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); + return c_line; +} +#endif + +/* CodeObjectCache */ +static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { + int start = 0, mid = 0, end = count - 1; + if (end >= 0 && code_line > entries[end].code_line) { + return count; + } + while (start < end) { + mid = start + (end - start) / 2; + if (code_line < entries[mid].code_line) { + end = mid; + } else if (code_line > entries[mid].code_line) { + start = mid + 1; + } else { + return mid; + } + } + if (code_line <= entries[mid].code_line) { + return mid; + } else { + return mid + 1; + } +} +static PyCodeObject *__pyx_find_code_object(int code_line) { + PyCodeObject* code_object; + int pos; + if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { + return NULL; + } + pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); + if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { + return NULL; + } + code_object = __pyx_code_cache.entries[pos].code_object; + Py_INCREF(code_object); + return code_object; +} +static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { + int pos, i; + __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; + if (unlikely(!code_line)) { + return; + } + if (unlikely(!entries)) { + entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); + if (likely(entries)) { + __pyx_code_cache.entries = entries; + __pyx_code_cache.max_count = 64; + __pyx_code_cache.count = 1; + entries[0].code_line = code_line; + entries[0].code_object = code_object; + Py_INCREF(code_object); + } + return; + } + pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); + if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { + PyCodeObject* tmp = entries[pos].code_object; + entries[pos].code_object = code_object; + Py_DECREF(tmp); + return; + } + if (__pyx_code_cache.count == __pyx_code_cache.max_count) { + int new_max = __pyx_code_cache.max_count + 64; + entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( + __pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry)); + if (unlikely(!entries)) { + return; + } + __pyx_code_cache.entries = entries; + __pyx_code_cache.max_count = new_max; + } + for (i=__pyx_code_cache.count; i>pos; i--) { + entries[i] = entries[i-1]; + } + entries[pos].code_line = code_line; + entries[pos].code_object = code_object; + __pyx_code_cache.count++; + Py_INCREF(code_object); +} + +/* AddTraceback */ +#include "compile.h" +#include "frameobject.h" +#include "traceback.h" +static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( + const char *funcname, int c_line, + int py_line, const char *filename) { + PyCodeObject *py_code = 0; + PyObject *py_srcfile = 0; + PyObject *py_funcname = 0; + #if PY_MAJOR_VERSION < 3 + py_srcfile = PyString_FromString(filename); + #else + py_srcfile = PyUnicode_FromString(filename); + #endif + if (!py_srcfile) goto bad; + if (c_line) { + #if PY_MAJOR_VERSION < 3 + py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); + #else + py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); + #endif + } + else { + #if PY_MAJOR_VERSION < 3 + py_funcname = PyString_FromString(funcname); + #else + py_funcname = PyUnicode_FromString(funcname); + #endif + } + if (!py_funcname) goto bad; + py_code = __Pyx_PyCode_New( + 0, + 0, + 0, + 0, + 0, + __pyx_empty_bytes, /*PyObject *code,*/ + __pyx_empty_tuple, /*PyObject *consts,*/ + __pyx_empty_tuple, /*PyObject *names,*/ + __pyx_empty_tuple, /*PyObject *varnames,*/ + __pyx_empty_tuple, /*PyObject *freevars,*/ + __pyx_empty_tuple, /*PyObject *cellvars,*/ + py_srcfile, /*PyObject *filename,*/ + py_funcname, /*PyObject *name,*/ + py_line, + __pyx_empty_bytes /*PyObject *lnotab*/ + ); + Py_DECREF(py_srcfile); + Py_DECREF(py_funcname); + return py_code; +bad: + Py_XDECREF(py_srcfile); + Py_XDECREF(py_funcname); + return NULL; +} +static void __Pyx_AddTraceback(const char *funcname, int c_line, + int py_line, const char *filename) { + PyCodeObject *py_code = 0; + PyFrameObject *py_frame = 0; + PyThreadState *tstate = __Pyx_PyThreadState_Current; + if (c_line) { + c_line = __Pyx_CLineForTraceback(tstate, c_line); + } + py_code = __pyx_find_code_object(c_line ? -c_line : py_line); + if (!py_code) { + py_code = __Pyx_CreateCodeObjectForTraceback( + funcname, c_line, py_line, filename); + if (!py_code) goto bad; + __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); + } + py_frame = PyFrame_New( + tstate, /*PyThreadState *tstate,*/ + py_code, /*PyCodeObject *code,*/ + __pyx_d, /*PyObject *globals,*/ + 0 /*PyObject *locals*/ + ); + if (!py_frame) goto bad; + __Pyx_PyFrame_SetLineNumber(py_frame, py_line); + PyTraceBack_Here(py_frame); +bad: + Py_XDECREF(py_code); + Py_XDECREF(py_frame); +} + +/* CIntToPy */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { + const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0; + const int is_unsigned = neg_one > const_zero; + if (is_unsigned) { + if (sizeof(int) < sizeof(long)) { + return PyInt_FromLong((long) value); + } else if (sizeof(int) <= sizeof(unsigned long)) { + return PyLong_FromUnsignedLong((unsigned long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { + return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); +#endif + } + } else { + if (sizeof(int) <= sizeof(long)) { + return PyInt_FromLong((long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { + return PyLong_FromLongLong((PY_LONG_LONG) value); +#endif + } + } + { + int one = 1; int little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&value; + return _PyLong_FromByteArray(bytes, sizeof(int), + little, !is_unsigned); + } +} + +/* CIntFromPyVerify */ +#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ + __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) +#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ + __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) +#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ + {\ + func_type value = func_value;\ + if (sizeof(target_type) < sizeof(func_type)) {\ + if (unlikely(value != (func_type) (target_type) value)) {\ + func_type zero = 0;\ + if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ + return (target_type) -1;\ + if (is_unsigned && unlikely(value < zero))\ + goto raise_neg_overflow;\ + else\ + goto raise_overflow;\ + }\ + }\ + return (target_type) value;\ + } + +/* CIntFromPy */ +static CYTHON_INLINE enum cudaDeviceAttr __Pyx_PyInt_As_enum__cudaDeviceAttr(PyObject *x) { + const enum cudaDeviceAttr neg_one = (enum cudaDeviceAttr) ((enum cudaDeviceAttr) 0 - (enum cudaDeviceAttr) 1), const_zero = (enum cudaDeviceAttr) 0; + const int is_unsigned = neg_one > const_zero; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x))) { + if (sizeof(enum cudaDeviceAttr) < sizeof(long)) { + __PYX_VERIFY_RETURN_INT(enum cudaDeviceAttr, long, PyInt_AS_LONG(x)) + } else { + long val = PyInt_AS_LONG(x); + if (is_unsigned && unlikely(val < 0)) { + goto raise_neg_overflow; + } + return (enum cudaDeviceAttr) val; + } + } else +#endif + if (likely(PyLong_Check(x))) { + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (enum cudaDeviceAttr) 0; + case 1: __PYX_VERIFY_RETURN_INT(enum cudaDeviceAttr, digit, digits[0]) + case 2: + if (8 * sizeof(enum cudaDeviceAttr) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(enum cudaDeviceAttr, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(enum cudaDeviceAttr) >= 2 * PyLong_SHIFT) { + return (enum cudaDeviceAttr) (((((enum cudaDeviceAttr)digits[1]) << PyLong_SHIFT) | (enum cudaDeviceAttr)digits[0])); + } + } + break; + case 3: + if (8 * sizeof(enum cudaDeviceAttr) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(enum cudaDeviceAttr, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(enum cudaDeviceAttr) >= 3 * PyLong_SHIFT) { + return (enum cudaDeviceAttr) (((((((enum cudaDeviceAttr)digits[2]) << PyLong_SHIFT) | (enum cudaDeviceAttr)digits[1]) << PyLong_SHIFT) | (enum cudaDeviceAttr)digits[0])); + } + } + break; + case 4: + if (8 * sizeof(enum cudaDeviceAttr) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(enum cudaDeviceAttr, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(enum cudaDeviceAttr) >= 4 * PyLong_SHIFT) { + return (enum cudaDeviceAttr) (((((((((enum cudaDeviceAttr)digits[3]) << PyLong_SHIFT) | (enum cudaDeviceAttr)digits[2]) << PyLong_SHIFT) | (enum cudaDeviceAttr)digits[1]) << PyLong_SHIFT) | (enum cudaDeviceAttr)digits[0])); + } + } + break; + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (enum cudaDeviceAttr) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if (sizeof(enum cudaDeviceAttr) <= sizeof(unsigned long)) { + __PYX_VERIFY_RETURN_INT_EXC(enum cudaDeviceAttr, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(enum cudaDeviceAttr) <= sizeof(unsigned PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(enum cudaDeviceAttr, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (enum cudaDeviceAttr) 0; + case -1: __PYX_VERIFY_RETURN_INT(enum cudaDeviceAttr, sdigit, (sdigit) (-(sdigit)digits[0])) + case 1: __PYX_VERIFY_RETURN_INT(enum cudaDeviceAttr, digit, +digits[0]) + case -2: + if (8 * sizeof(enum cudaDeviceAttr) - 1 > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(enum cudaDeviceAttr, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(enum cudaDeviceAttr) - 1 > 2 * PyLong_SHIFT) { + return (enum cudaDeviceAttr) (((enum cudaDeviceAttr)-1)*(((((enum cudaDeviceAttr)digits[1]) << PyLong_SHIFT) | (enum cudaDeviceAttr)digits[0]))); + } + } + break; + case 2: + if (8 * sizeof(enum cudaDeviceAttr) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(enum cudaDeviceAttr, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(enum cudaDeviceAttr) - 1 > 2 * PyLong_SHIFT) { + return (enum cudaDeviceAttr) ((((((enum cudaDeviceAttr)digits[1]) << PyLong_SHIFT) | (enum cudaDeviceAttr)digits[0]))); + } + } + break; + case -3: + if (8 * sizeof(enum cudaDeviceAttr) - 1 > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(enum cudaDeviceAttr, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(enum cudaDeviceAttr) - 1 > 3 * PyLong_SHIFT) { + return (enum cudaDeviceAttr) (((enum cudaDeviceAttr)-1)*(((((((enum cudaDeviceAttr)digits[2]) << PyLong_SHIFT) | (enum cudaDeviceAttr)digits[1]) << PyLong_SHIFT) | (enum cudaDeviceAttr)digits[0]))); + } + } + break; + case 3: + if (8 * sizeof(enum cudaDeviceAttr) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(enum cudaDeviceAttr, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(enum cudaDeviceAttr) - 1 > 3 * PyLong_SHIFT) { + return (enum cudaDeviceAttr) ((((((((enum cudaDeviceAttr)digits[2]) << PyLong_SHIFT) | (enum cudaDeviceAttr)digits[1]) << PyLong_SHIFT) | (enum cudaDeviceAttr)digits[0]))); + } + } + break; + case -4: + if (8 * sizeof(enum cudaDeviceAttr) - 1 > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(enum cudaDeviceAttr, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(enum cudaDeviceAttr) - 1 > 4 * PyLong_SHIFT) { + return (enum cudaDeviceAttr) (((enum cudaDeviceAttr)-1)*(((((((((enum cudaDeviceAttr)digits[3]) << PyLong_SHIFT) | (enum cudaDeviceAttr)digits[2]) << PyLong_SHIFT) | (enum cudaDeviceAttr)digits[1]) << PyLong_SHIFT) | (enum cudaDeviceAttr)digits[0]))); + } + } + break; + case 4: + if (8 * sizeof(enum cudaDeviceAttr) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(enum cudaDeviceAttr, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(enum cudaDeviceAttr) - 1 > 4 * PyLong_SHIFT) { + return (enum cudaDeviceAttr) ((((((((((enum cudaDeviceAttr)digits[3]) << PyLong_SHIFT) | (enum cudaDeviceAttr)digits[2]) << PyLong_SHIFT) | (enum cudaDeviceAttr)digits[1]) << PyLong_SHIFT) | (enum cudaDeviceAttr)digits[0]))); + } + } + break; + } +#endif + if (sizeof(enum cudaDeviceAttr) <= sizeof(long)) { + __PYX_VERIFY_RETURN_INT_EXC(enum cudaDeviceAttr, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(enum cudaDeviceAttr) <= sizeof(PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(enum cudaDeviceAttr, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif + } + } + { +#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) + PyErr_SetString(PyExc_RuntimeError, + "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); +#else + enum cudaDeviceAttr val; + PyObject *v = __Pyx_PyNumber_IntOrLong(x); + #if PY_MAJOR_VERSION < 3 + if (likely(v) && !PyLong_Check(v)) { + PyObject *tmp = v; + v = PyNumber_Long(tmp); + Py_DECREF(tmp); + } + #endif + if (likely(v)) { + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + int ret = _PyLong_AsByteArray((PyLongObject *)v, + bytes, sizeof(val), + is_little, !is_unsigned); + Py_DECREF(v); + if (likely(!ret)) + return val; + } +#endif + return (enum cudaDeviceAttr) -1; + } + } else { + enum cudaDeviceAttr val; + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + if (!tmp) return (enum cudaDeviceAttr) -1; + val = __Pyx_PyInt_As_enum__cudaDeviceAttr(tmp); + Py_DECREF(tmp); + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to enum cudaDeviceAttr"); + return (enum cudaDeviceAttr) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to enum cudaDeviceAttr"); + return (enum cudaDeviceAttr) -1; +} + +/* CIntFromPy */ +static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { + const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0; + const int is_unsigned = neg_one > const_zero; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x))) { + if (sizeof(int) < sizeof(long)) { + __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) + } else { + long val = PyInt_AS_LONG(x); + if (is_unsigned && unlikely(val < 0)) { + goto raise_neg_overflow; + } + return (int) val; + } + } else +#endif + if (likely(PyLong_Check(x))) { + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (int) 0; + case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) + case 2: + if (8 * sizeof(int) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { + return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + case 3: + if (8 * sizeof(int) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { + return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + case 4: + if (8 * sizeof(int) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { + return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (int) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if (sizeof(int) <= sizeof(unsigned long)) { + __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (int) 0; + case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) + case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) + case -2: + if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { + return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 2: + if (8 * sizeof(int) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { + return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case -3: + if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { + return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 3: + if (8 * sizeof(int) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { + return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case -4: + if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { + return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 4: + if (8 * sizeof(int) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { + return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + } +#endif + if (sizeof(int) <= sizeof(long)) { + __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif + } + } + { +#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) + PyErr_SetString(PyExc_RuntimeError, + "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); +#else + int val; + PyObject *v = __Pyx_PyNumber_IntOrLong(x); + #if PY_MAJOR_VERSION < 3 + if (likely(v) && !PyLong_Check(v)) { + PyObject *tmp = v; + v = PyNumber_Long(tmp); + Py_DECREF(tmp); + } + #endif + if (likely(v)) { + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + int ret = _PyLong_AsByteArray((PyLongObject *)v, + bytes, sizeof(val), + is_little, !is_unsigned); + Py_DECREF(v); + if (likely(!ret)) + return val; + } +#endif + return (int) -1; + } + } else { + int val; + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + if (!tmp) return (int) -1; + val = __Pyx_PyInt_As_int(tmp); + Py_DECREF(tmp); + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to int"); + return (int) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to int"); + return (int) -1; +} + +/* CIntToPy */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { + const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; + const int is_unsigned = neg_one > const_zero; + if (is_unsigned) { + if (sizeof(long) < sizeof(long)) { + return PyInt_FromLong((long) value); + } else if (sizeof(long) <= sizeof(unsigned long)) { + return PyLong_FromUnsignedLong((unsigned long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { + return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); +#endif + } + } else { + if (sizeof(long) <= sizeof(long)) { + return PyInt_FromLong((long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { + return PyLong_FromLongLong((PY_LONG_LONG) value); +#endif + } + } + { + int one = 1; int little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&value; + return _PyLong_FromByteArray(bytes, sizeof(long), + little, !is_unsigned); + } +} + +/* CIntFromPy */ +static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { + const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; + const int is_unsigned = neg_one > const_zero; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x))) { + if (sizeof(long) < sizeof(long)) { + __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) + } else { + long val = PyInt_AS_LONG(x); + if (is_unsigned && unlikely(val < 0)) { + goto raise_neg_overflow; + } + return (long) val; + } + } else +#endif + if (likely(PyLong_Check(x))) { + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (long) 0; + case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) + case 2: + if (8 * sizeof(long) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { + return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + case 3: + if (8 * sizeof(long) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { + return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + case 4: + if (8 * sizeof(long) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { + return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (long) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if (sizeof(long) <= sizeof(unsigned long)) { + __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (long) 0; + case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) + case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) + case -2: + if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 2: + if (8 * sizeof(long) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case -3: + if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 3: + if (8 * sizeof(long) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case -4: + if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { + return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 4: + if (8 * sizeof(long) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { + return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + } +#endif + if (sizeof(long) <= sizeof(long)) { + __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif + } + } + { +#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) + PyErr_SetString(PyExc_RuntimeError, + "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); +#else + long val; + PyObject *v = __Pyx_PyNumber_IntOrLong(x); + #if PY_MAJOR_VERSION < 3 + if (likely(v) && !PyLong_Check(v)) { + PyObject *tmp = v; + v = PyNumber_Long(tmp); + Py_DECREF(tmp); + } + #endif + if (likely(v)) { + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + int ret = _PyLong_AsByteArray((PyLongObject *)v, + bytes, sizeof(val), + is_little, !is_unsigned); + Py_DECREF(v); + if (likely(!ret)) + return val; + } +#endif + return (long) -1; + } + } else { + long val; + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + if (!tmp) return (long) -1; + val = __Pyx_PyInt_As_long(tmp); + Py_DECREF(tmp); + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to long"); + return (long) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to long"); + return (long) -1; +} + +/* FastTypeChecks */ +#if CYTHON_COMPILING_IN_CPYTHON +static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { + while (a) { + a = a->tp_base; + if (a == b) + return 1; + } + return b == &PyBaseObject_Type; +} +static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { + PyObject *mro; + if (a == b) return 1; + mro = a->tp_mro; + if (likely(mro)) { + Py_ssize_t i, n; + n = PyTuple_GET_SIZE(mro); + for (i = 0; i < n; i++) { + if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) + return 1; + } + return 0; + } + return __Pyx_InBases(a, b); +} +#if PY_MAJOR_VERSION == 2 +static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { + PyObject *exception, *value, *tb; + int res; + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ErrFetch(&exception, &value, &tb); + res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; + if (unlikely(res == -1)) { + PyErr_WriteUnraisable(err); + res = 0; + } + if (!res) { + res = PyObject_IsSubclass(err, exc_type2); + if (unlikely(res == -1)) { + PyErr_WriteUnraisable(err); + res = 0; + } + } + __Pyx_ErrRestore(exception, value, tb); + return res; +} +#else +static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { + int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; + if (!res) { + res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); + } + return res; +} +#endif +static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { + Py_ssize_t i, n; + assert(PyExceptionClass_Check(exc_type)); + n = PyTuple_GET_SIZE(tuple); +#if PY_MAJOR_VERSION >= 3 + for (i=0; ip) { + #if PY_MAJOR_VERSION < 3 + if (t->is_unicode) { + *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); + } else if (t->intern) { + *t->p = PyString_InternFromString(t->s); + } else { + *t->p = PyString_FromStringAndSize(t->s, t->n - 1); + } + #else + if (t->is_unicode | t->is_str) { + if (t->intern) { + *t->p = PyUnicode_InternFromString(t->s); + } else if (t->encoding) { + *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); + } else { + *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); + } + } else { + *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); + } + #endif + if (!*t->p) + return -1; + if (PyObject_Hash(*t->p) == -1) + return -1; + ++t; + } + return 0; +} + +static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { + return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); +} +static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { + Py_ssize_t ignore; + return __Pyx_PyObject_AsStringAndSize(o, &ignore); +} +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT +#if !CYTHON_PEP393_ENABLED +static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { + char* defenc_c; + PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); + if (!defenc) return NULL; + defenc_c = PyBytes_AS_STRING(defenc); +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + { + char* end = defenc_c + PyBytes_GET_SIZE(defenc); + char* c; + for (c = defenc_c; c < end; c++) { + if ((unsigned char) (*c) >= 128) { + PyUnicode_AsASCIIString(o); + return NULL; + } + } + } +#endif + *length = PyBytes_GET_SIZE(defenc); + return defenc_c; +} +#else +static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { + if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + if (likely(PyUnicode_IS_ASCII(o))) { + *length = PyUnicode_GET_LENGTH(o); + return PyUnicode_AsUTF8(o); + } else { + PyUnicode_AsASCIIString(o); + return NULL; + } +#else + return PyUnicode_AsUTF8AndSize(o, length); +#endif +} +#endif +#endif +static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT + if ( +#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + __Pyx_sys_getdefaultencoding_not_ascii && +#endif + PyUnicode_Check(o)) { + return __Pyx_PyUnicode_AsStringAndSize(o, length); + } else +#endif +#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) + if (PyByteArray_Check(o)) { + *length = PyByteArray_GET_SIZE(o); + return PyByteArray_AS_STRING(o); + } else +#endif + { + char* result; + int r = PyBytes_AsStringAndSize(o, &result, length); + if (unlikely(r < 0)) { + return NULL; + } else { + return result; + } + } +} +static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { + int is_true = x == Py_True; + if (is_true | (x == Py_False) | (x == Py_None)) return is_true; + else return PyObject_IsTrue(x); +} +static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) { + int retval; + if (unlikely(!x)) return -1; + retval = __Pyx_PyObject_IsTrue(x); + Py_DECREF(x); + return retval; +} +static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { +#if PY_MAJOR_VERSION >= 3 + if (PyLong_Check(result)) { + if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, + "__int__ returned non-int (type %.200s). " + "The ability to return an instance of a strict subclass of int " + "is deprecated, and may be removed in a future version of Python.", + Py_TYPE(result)->tp_name)) { + Py_DECREF(result); + return NULL; + } + return result; + } +#endif + PyErr_Format(PyExc_TypeError, + "__%.4s__ returned non-%.4s (type %.200s)", + type_name, type_name, Py_TYPE(result)->tp_name); + Py_DECREF(result); + return NULL; +} +static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { +#if CYTHON_USE_TYPE_SLOTS + PyNumberMethods *m; +#endif + const char *name = NULL; + PyObject *res = NULL; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x) || PyLong_Check(x))) +#else + if (likely(PyLong_Check(x))) +#endif + return __Pyx_NewRef(x); +#if CYTHON_USE_TYPE_SLOTS + m = Py_TYPE(x)->tp_as_number; + #if PY_MAJOR_VERSION < 3 + if (m && m->nb_int) { + name = "int"; + res = m->nb_int(x); + } + else if (m && m->nb_long) { + name = "long"; + res = m->nb_long(x); + } + #else + if (likely(m && m->nb_int)) { + name = "int"; + res = m->nb_int(x); + } + #endif +#else + if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { + res = PyNumber_Int(x); + } +#endif + if (likely(res)) { +#if PY_MAJOR_VERSION < 3 + if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { +#else + if (unlikely(!PyLong_CheckExact(res))) { +#endif + return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); + } + } + else if (!PyErr_Occurred()) { + PyErr_SetString(PyExc_TypeError, + "an integer is required"); + } + return res; +} +static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { + Py_ssize_t ival; + PyObject *x; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_CheckExact(b))) { + if (sizeof(Py_ssize_t) >= sizeof(long)) + return PyInt_AS_LONG(b); + else + return PyInt_AsSsize_t(b); + } +#endif + if (likely(PyLong_CheckExact(b))) { + #if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)b)->ob_digit; + const Py_ssize_t size = Py_SIZE(b); + if (likely(__Pyx_sst_abs(size) <= 1)) { + ival = likely(size) ? digits[0] : 0; + if (size == -1) ival = -ival; + return ival; + } else { + switch (size) { + case 2: + if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { + return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -2: + if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case 3: + if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { + return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -3: + if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case 4: + if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { + return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -4: + if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + } + } + #endif + return PyLong_AsSsize_t(b); + } + x = PyNumber_Index(b); + if (!x) return -1; + ival = PyInt_AsSsize_t(x); + Py_DECREF(x); + return ival; +} +static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) { + return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False); +} +static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { + return PyInt_FromSize_t(ival); +} + + +#endif /* Py_PYTHON_H */ diff --git a/python/cudf/cudf/_cuda/gpu.pxd b/python/cudf/cudf/_cuda/gpu.pxd new file mode 100644 index 00000000000..f2428eade81 --- /dev/null +++ b/python/cudf/cudf/_cuda/gpu.pxd @@ -0,0 +1,114 @@ +# Copyright (c) 2020, NVIDIA CORPORATION. + +cdef extern from "cuda.h" nogil: + cdef enum cudaDeviceAttr: + cudaDevAttrMaxThreadsPerBlock = 1 + cudaDevAttrMaxBlockDimX = 2 + cudaDevAttrMaxBlockDimY = 3 + cudaDevAttrMaxBlockDimZ = 4 + cudaDevAttrMaxGridDimX = 5 + cudaDevAttrMaxGridDimY = 6 + cudaDevAttrMaxGridDimZ = 7 + cudaDevAttrMaxSharedMemoryPerBlock = 8 + cudaDevAttrTotalConstantMemory = 9 + cudaDevAttrWarpSize = 10 + cudaDevAttrMaxPitch = 11 + cudaDevAttrMaxRegistersPerBlock = 12 + cudaDevAttrClockRate = 13 + cudaDevAttrTextureAlignment = 14 + cudaDevAttrGpuOverlap = 15 + cudaDevAttrMultiProcessorCount = 16 + cudaDevAttrKernelExecTimeout = 17 + cudaDevAttrIntegrated = 18 + cudaDevAttrCanMapHostMemory = 19 + cudaDevAttrComputeMode = 20 + cudaDevAttrMaxTexture1DWidth = 21 + cudaDevAttrMaxTexture2DWidth = 22 + cudaDevAttrMaxTexture2DHeight = 23 + cudaDevAttrMaxTexture3DWidth = 24 + cudaDevAttrMaxTexture3DHeight = 25 + cudaDevAttrMaxTexture3DDepth = 26 + cudaDevAttrMaxTexture2DLayeredWidth = 27 + cudaDevAttrMaxTexture2DLayeredHeight = 28 + cudaDevAttrMaxTexture2DLayeredLayers = 29 + cudaDevAttrSurfaceAlignment = 30 + cudaDevAttrConcurrentKernels = 31 + cudaDevAttrEccEnabled = 32 + cudaDevAttrPciBusId = 33 + cudaDevAttrPciDeviceId = 34 + cudaDevAttrTccDriver = 35 + cudaDevAttrMemoryClockRate = 36 + cudaDevAttrGlobalMemoryBusWidth = 37 + cudaDevAttrL2CacheSize = 38 + cudaDevAttrMaxThreadsPerMultiProcessor = 39 + cudaDevAttrAsyncEngineCount = 40 + cudaDevAttrUnifiedAddressing = 41 + cudaDevAttrMaxTexture1DLayeredWidth = 42 + cudaDevAttrMaxTexture1DLayeredLayers = 43 + cudaDevAttrMaxTexture2DGatherWidth = 45 + cudaDevAttrMaxTexture2DGatherHeight = 46 + cudaDevAttrMaxTexture3DWidthAlt = 47 + cudaDevAttrMaxTexture3DHeightAlt = 48 + cudaDevAttrMaxTexture3DDepthAlt = 49 + cudaDevAttrPciDomainId = 50 + cudaDevAttrTexturePitchAlignment = 51 + cudaDevAttrMaxTextureCubemapWidth = 52 + cudaDevAttrMaxTextureCubemapLayeredWidth = 53 + cudaDevAttrMaxTextureCubemapLayeredLayers = 54 + cudaDevAttrMaxSurface1DWidth = 55 + cudaDevAttrMaxSurface2DWidth = 56 + cudaDevAttrMaxSurface2DHeight = 57 + cudaDevAttrMaxSurface3DWidth = 58 + cudaDevAttrMaxSurface3DHeight = 59 + cudaDevAttrMaxSurface3DDepth = 60 + cudaDevAttrMaxSurface1DLayeredWidth = 61 + cudaDevAttrMaxSurface1DLayeredLayers = 62 + cudaDevAttrMaxSurface2DLayeredWidth = 63 + cudaDevAttrMaxSurface2DLayeredHeight = 64 + cudaDevAttrMaxSurface2DLayeredLayers = 65 + cudaDevAttrMaxSurfaceCubemapWidth = 66 + cudaDevAttrMaxSurfaceCubemapLayeredWidth = 67 + cudaDevAttrMaxSurfaceCubemapLayeredLayers = 68 + cudaDevAttrMaxTexture1DLinearWidth = 69 + cudaDevAttrMaxTexture2DLinearWidth = 70 + cudaDevAttrMaxTexture2DLinearHeight = 71 + cudaDevAttrMaxTexture2DLinearPitch = 72 + cudaDevAttrMaxTexture2DMipmappedWidth = 73 + cudaDevAttrMaxTexture2DMipmappedHeight = 74 + cudaDevAttrComputeCapabilityMajor = 75 + cudaDevAttrComputeCapabilityMinor = 76 + cudaDevAttrMaxTexture1DMipmappedWidth = 77 + cudaDevAttrStreamPrioritiesSupported = 78 + cudaDevAttrGlobalL1CacheSupported = 79 + cudaDevAttrLocalL1CacheSupported = 80 + cudaDevAttrMaxSharedMemoryPerMultiprocessor = 81 + cudaDevAttrMaxRegistersPerMultiprocessor = 82 + cudaDevAttrManagedMemory = 83 + cudaDevAttrIsMultiGpuBoard = 84 + cudaDevAttrMultiGpuBoardGroupID = 85 + cudaDevAttrHostNativeAtomicSupported = 86 + cudaDevAttrSingleToDoublePrecisionPerfRatio = 87 + cudaDevAttrPageableMemoryAccess = 88 + cudaDevAttrConcurrentManagedAccess = 89 + cudaDevAttrComputePreemptionSupported = 90 + cudaDevAttrCanUseHostPointerForRegisteredMem = 91 + cudaDevAttrReserved92 = 92 + cudaDevAttrReserved93 = 93 + cudaDevAttrReserved94 = 94 + cudaDevAttrCooperativeLaunch = 95 + cudaDevAttrCooperativeMultiDeviceLaunch = 96 + cudaDevAttrMaxSharedMemoryPerBlockOptin = 97 + cudaDevAttrCanFlushRemoteWrites = 98 + cudaDevAttrHostRegisterSupported = 99 + cudaDevAttrPageableMemoryAccessUsesHostPageTables = 100 + cudaDevAttrDirectManagedMemAccessFromHost = 101 + +cdef extern from "cuda_runtime_api.h" nogil: + + int cudaDriverGetVersion(int* driverVersion) except + + int cudaRuntimeGetVersion(int* runtimeVersion) except + + int cudaGetDeviceCount(int* count) except + + int cudaDeviceGetAttribute(int* value, cudaDeviceAttr attr, int device) \ + except + + +ctypedef int underlying_type_attribute diff --git a/python/cudf/cudf/_cuda/gpu.pyx b/python/cudf/cudf/_cuda/gpu.pyx new file mode 100644 index 00000000000..09211b58d9b --- /dev/null +++ b/python/cudf/cudf/_cuda/gpu.pyx @@ -0,0 +1,272 @@ +# Copyright (c) 2020, NVIDIA CORPORATION. + +from cudf._cuda.gpu cimport ( + cudaDriverGetVersion, + cudaRuntimeGetVersion, + cudaGetDeviceCount, + cudaDeviceGetAttribute, + cudaDeviceAttr +) +from enum import IntEnum +from cudf._cuda.gpu cimport underlying_type_attribute as c_attr + + +class CudaDeviceAttr(IntEnum): + cudaDevAttrMaxThreadsPerBlock = \ + cudaDeviceAttr.cudaDevAttrMaxThreadsPerBlock + cudaDevAttrMaxBlockDimX = \ + cudaDeviceAttr.cudaDevAttrMaxBlockDimX + cudaDevAttrMaxBlockDimY = \ + cudaDeviceAttr.cudaDevAttrMaxBlockDimY + cudaDevAttrMaxBlockDimZ = \ + cudaDeviceAttr.cudaDevAttrMaxBlockDimZ + cudaDevAttrMaxGridDimX = \ + cudaDeviceAttr.cudaDevAttrMaxGridDimX + cudaDevAttrMaxGridDimY = \ + cudaDeviceAttr.cudaDevAttrMaxGridDimY + cudaDevAttrMaxGridDimZ = \ + cudaDeviceAttr.cudaDevAttrMaxGridDimZ + cudaDevAttrMaxSharedMemoryPerBlock = \ + cudaDeviceAttr.cudaDevAttrMaxSharedMemoryPerBlock + cudaDevAttrTotalConstantMemory = \ + cudaDeviceAttr.cudaDevAttrTotalConstantMemory + cudaDevAttrWarpSize = \ + cudaDeviceAttr.cudaDevAttrWarpSize + cudaDevAttrMaxPitch = \ + cudaDeviceAttr.cudaDevAttrMaxPitch + cudaDevAttrMaxRegistersPerBlock = \ + cudaDeviceAttr.cudaDevAttrMaxRegistersPerBlock + cudaDevAttrClockRate = \ + cudaDeviceAttr.cudaDevAttrClockRate + cudaDevAttrTextureAlignment = \ + cudaDeviceAttr.cudaDevAttrTextureAlignment + cudaDevAttrGpuOverlap = \ + cudaDeviceAttr.cudaDevAttrGpuOverlap + cudaDevAttrMultiProcessorCount = \ + cudaDeviceAttr.cudaDevAttrMultiProcessorCount + cudaDevAttrKernelExecTimeout = \ + cudaDeviceAttr.cudaDevAttrKernelExecTimeout + cudaDevAttrIntegrated = \ + cudaDeviceAttr.cudaDevAttrIntegrated + cudaDevAttrCanMapHostMemory = \ + cudaDeviceAttr.cudaDevAttrCanMapHostMemory + cudaDevAttrComputeMode = \ + cudaDeviceAttr.cudaDevAttrComputeMode + cudaDevAttrMaxTexture1DWidth = \ + cudaDeviceAttr.cudaDevAttrMaxTexture1DWidth + cudaDevAttrMaxTexture2DWidth = \ + cudaDeviceAttr.cudaDevAttrMaxTexture2DWidth + cudaDevAttrMaxTexture2DHeight = \ + cudaDeviceAttr.cudaDevAttrMaxTexture2DHeight + cudaDevAttrMaxTexture3DWidth = \ + cudaDeviceAttr.cudaDevAttrMaxTexture3DWidth + cudaDevAttrMaxTexture3DHeight = \ + cudaDeviceAttr.cudaDevAttrMaxTexture3DHeight + cudaDevAttrMaxTexture3DDepth = \ + cudaDeviceAttr.cudaDevAttrMaxTexture3DDepth + cudaDevAttrMaxTexture2DLayeredWidth = \ + cudaDeviceAttr.cudaDevAttrMaxTexture2DLayeredWidth + cudaDevAttrMaxTexture2DLayeredHeight = \ + cudaDeviceAttr.cudaDevAttrMaxTexture2DLayeredHeight + cudaDevAttrMaxTexture2DLayeredLayers = \ + cudaDeviceAttr.cudaDevAttrMaxTexture2DLayeredLayers + cudaDevAttrSurfaceAlignment = \ + cudaDeviceAttr.cudaDevAttrSurfaceAlignment + cudaDevAttrConcurrentKernels = \ + cudaDeviceAttr.cudaDevAttrConcurrentKernels + cudaDevAttrEccEnabled = \ + cudaDeviceAttr.cudaDevAttrEccEnabled + cudaDevAttrPciBusId = \ + cudaDeviceAttr.cudaDevAttrPciBusId + cudaDevAttrPciDeviceId = \ + cudaDeviceAttr.cudaDevAttrPciDeviceId + cudaDevAttrTccDriver = \ + cudaDeviceAttr.cudaDevAttrTccDriver + cudaDevAttrMemoryClockRate = \ + cudaDeviceAttr.cudaDevAttrMemoryClockRate + cudaDevAttrGlobalMemoryBusWidth = \ + cudaDeviceAttr.cudaDevAttrGlobalMemoryBusWidth + cudaDevAttrL2CacheSize = \ + cudaDeviceAttr.cudaDevAttrL2CacheSize + cudaDevAttrMaxThreadsPerMultiProcessor = \ + cudaDeviceAttr.cudaDevAttrMaxThreadsPerMultiProcessor + cudaDevAttrAsyncEngineCount = \ + cudaDeviceAttr.cudaDevAttrAsyncEngineCount + cudaDevAttrUnifiedAddressing = \ + cudaDeviceAttr.cudaDevAttrUnifiedAddressing + cudaDevAttrMaxTexture1DLayeredWidth = \ + cudaDeviceAttr.cudaDevAttrMaxTexture1DLayeredWidth + cudaDevAttrMaxTexture1DLayeredLayers = \ + cudaDeviceAttr.cudaDevAttrMaxTexture1DLayeredLayers + cudaDevAttrMaxTexture2DGatherWidth = \ + cudaDeviceAttr.cudaDevAttrMaxTexture2DGatherWidth + cudaDevAttrMaxTexture2DGatherHeight = \ + cudaDeviceAttr.cudaDevAttrMaxTexture2DGatherHeight + cudaDevAttrMaxTexture3DWidthAlt = \ + cudaDeviceAttr.cudaDevAttrMaxTexture3DWidthAlt + cudaDevAttrMaxTexture3DHeightAlt = \ + cudaDeviceAttr.cudaDevAttrMaxTexture3DHeightAlt + cudaDevAttrMaxTexture3DDepthAlt = \ + cudaDeviceAttr.cudaDevAttrMaxTexture3DDepthAlt + cudaDevAttrPciDomainId = \ + cudaDeviceAttr.cudaDevAttrPciDomainId + cudaDevAttrTexturePitchAlignment = \ + cudaDeviceAttr.cudaDevAttrTexturePitchAlignment + cudaDevAttrMaxTextureCubemapWidth = \ + cudaDeviceAttr.cudaDevAttrMaxTextureCubemapWidth + cudaDevAttrMaxTextureCubemapLayeredWidth = \ + cudaDeviceAttr.cudaDevAttrMaxTextureCubemapLayeredWidth + cudaDevAttrMaxTextureCubemapLayeredLayers = \ + cudaDeviceAttr.cudaDevAttrMaxTextureCubemapLayeredLayers + cudaDevAttrMaxSurface1DWidth = \ + cudaDeviceAttr.cudaDevAttrMaxSurface1DWidth + cudaDevAttrMaxSurface2DWidth = \ + cudaDeviceAttr.cudaDevAttrMaxSurface2DWidth + cudaDevAttrMaxSurface2DHeight = \ + cudaDeviceAttr.cudaDevAttrMaxSurface2DHeight + cudaDevAttrMaxSurface3DWidth = \ + cudaDeviceAttr.cudaDevAttrMaxSurface3DWidth + cudaDevAttrMaxSurface3DHeight = \ + cudaDeviceAttr.cudaDevAttrMaxSurface3DHeight + cudaDevAttrMaxSurface3DDepth = \ + cudaDeviceAttr.cudaDevAttrMaxSurface3DDepth + cudaDevAttrMaxSurface1DLayeredWidth = \ + cudaDeviceAttr.cudaDevAttrMaxSurface1DLayeredWidth + cudaDevAttrMaxSurface1DLayeredLayers = \ + cudaDeviceAttr.cudaDevAttrMaxSurface1DLayeredLayers + cudaDevAttrMaxSurface2DLayeredWidth = \ + cudaDeviceAttr.cudaDevAttrMaxSurface2DLayeredWidth + cudaDevAttrMaxSurface2DLayeredHeight = \ + cudaDeviceAttr.cudaDevAttrMaxSurface2DLayeredHeight + cudaDevAttrMaxSurface2DLayeredLayers = \ + cudaDeviceAttr.cudaDevAttrMaxSurface2DLayeredLayers + cudaDevAttrMaxSurfaceCubemapWidth = \ + cudaDeviceAttr.cudaDevAttrMaxSurfaceCubemapWidth + cudaDevAttrMaxSurfaceCubemapLayeredWidth = \ + cudaDeviceAttr.cudaDevAttrMaxSurfaceCubemapLayeredWidth + cudaDevAttrMaxSurfaceCubemapLayeredLayers = \ + cudaDeviceAttr.cudaDevAttrMaxSurfaceCubemapLayeredLayers + cudaDevAttrMaxTexture1DLinearWidth = \ + cudaDeviceAttr.cudaDevAttrMaxTexture1DLinearWidth + cudaDevAttrMaxTexture2DLinearWidth = \ + cudaDeviceAttr.cudaDevAttrMaxTexture2DLinearWidth + cudaDevAttrMaxTexture2DLinearHeight = \ + cudaDeviceAttr.cudaDevAttrMaxTexture2DLinearHeight + cudaDevAttrMaxTexture2DLinearPitch = \ + cudaDeviceAttr.cudaDevAttrMaxTexture2DLinearPitch + cudaDevAttrMaxTexture2DMipmappedWidth = \ + cudaDeviceAttr.cudaDevAttrMaxTexture2DMipmappedWidth + cudaDevAttrMaxTexture2DMipmappedHeight = \ + cudaDeviceAttr.cudaDevAttrMaxTexture2DMipmappedHeight + cudaDevAttrComputeCapabilityMajor = \ + cudaDeviceAttr.cudaDevAttrComputeCapabilityMajor + cudaDevAttrComputeCapabilityMinor = \ + cudaDeviceAttr.cudaDevAttrComputeCapabilityMinor + cudaDevAttrMaxTexture1DMipmappedWidth = \ + cudaDeviceAttr.cudaDevAttrMaxTexture1DMipmappedWidth + cudaDevAttrStreamPrioritiesSupported = \ + cudaDeviceAttr.cudaDevAttrStreamPrioritiesSupported + cudaDevAttrGlobalL1CacheSupported = \ + cudaDeviceAttr.cudaDevAttrGlobalL1CacheSupported + cudaDevAttrLocalL1CacheSupported = \ + cudaDeviceAttr.cudaDevAttrLocalL1CacheSupported + cudaDevAttrMaxSharedMemoryPerMultiprocessor = \ + cudaDeviceAttr.cudaDevAttrMaxSharedMemoryPerMultiprocessor + cudaDevAttrMaxRegistersPerMultiprocessor = \ + cudaDeviceAttr.cudaDevAttrMaxRegistersPerMultiprocessor + cudaDevAttrManagedMemory = \ + cudaDeviceAttr.cudaDevAttrManagedMemory + cudaDevAttrIsMultiGpuBoard = \ + cudaDeviceAttr.cudaDevAttrIsMultiGpuBoard + cudaDevAttrMultiGpuBoardGroupID = \ + cudaDeviceAttr.cudaDevAttrMultiGpuBoardGroupID + cudaDevAttrHostNativeAtomicSupported = \ + cudaDeviceAttr.cudaDevAttrHostNativeAtomicSupported + cudaDevAttrSingleToDoublePrecisionPerfRatio = \ + cudaDeviceAttr.cudaDevAttrSingleToDoublePrecisionPerfRatio + cudaDevAttrPageableMemoryAccess = \ + cudaDeviceAttr.cudaDevAttrPageableMemoryAccess + cudaDevAttrConcurrentManagedAccess = \ + cudaDeviceAttr.cudaDevAttrConcurrentManagedAccess + cudaDevAttrComputePreemptionSupported = \ + cudaDeviceAttr.cudaDevAttrComputePreemptionSupported + cudaDevAttrCanUseHostPointerForRegisteredMem = \ + cudaDeviceAttr.cudaDevAttrCanUseHostPointerForRegisteredMem + cudaDevAttrReserved92 = \ + cudaDeviceAttr.cudaDevAttrReserved92 + cudaDevAttrReserved93 = \ + cudaDeviceAttr.cudaDevAttrReserved93 + cudaDevAttrReserved94 = \ + cudaDeviceAttr.cudaDevAttrReserved94 + cudaDevAttrCooperativeLaunch = \ + cudaDeviceAttr.cudaDevAttrCooperativeLaunch + cudaDevAttrCooperativeMultiDeviceLaunch = \ + cudaDeviceAttr.cudaDevAttrCooperativeMultiDeviceLaunch + cudaDevAttrMaxSharedMemoryPerBlockOptin = \ + cudaDeviceAttr.cudaDevAttrMaxSharedMemoryPerBlockOptin + cudaDevAttrCanFlushRemoteWrites = \ + cudaDeviceAttr.cudaDevAttrCanFlushRemoteWrites + cudaDevAttrHostRegisterSupported = \ + cudaDeviceAttr.cudaDevAttrHostRegisterSupported + cudaDevAttrPageableMemoryAccessUsesHostPageTables = \ + \ + cudaDeviceAttr.cudaDevAttrPageableMemoryAccessUsesHostPageTables + cudaDevAttrDirectManagedMemAccessFromHost = \ + cudaDeviceAttr.cudaDevAttrDirectManagedMemAccessFromHost + + +def driverGetVersion(): + """ + Returns in the latest version of CUDA supported by the driver. + The version is returned as (1000 major + 10 minor). For example, + CUDA 9.2 would be represented by 9020. If no driver is installed, + then 0 is returned as the driver version. + + This function returns -1 if driver version is NULL. + """ + cdef int version + status = cudaDriverGetVersion(&version) + return -1 if status != 0 else version + + +def runtimeGetVersion(): + """ + Returns the version number of the current CUDA Runtime instance. + The version is returned as (1000 major + 10 minor). For example, + CUDA 9.2 would be represented by 9020. + + This function returns -1 if runtime version is NULL. + """ + + cdef int version + status = cudaRuntimeGetVersion(&version) + return -1 if status != 0 else version + + +def getDeviceCount(): + """ + Returns the number of devices with compute capability greater or + equal to 2.0 that are available for execution. + + This function returns -1 if NULL device pointer is assigned. + """ + + cdef int count + status = cudaGetDeviceCount(&count) + return -1 if status != 0 else count + + +def getDeviceAttribute(attr, device): + """ + Returns information about the device. + + Parameters + attr + Device attribute to query + device + Device number to query + """ + + cdef int value + status = cudaDeviceGetAttribute(&value, attr, device) + return -1 if status != 0 else value diff --git a/python/cudf/cudf/utils/gpu.pxd b/python/cudf/cudf/utils/gpu.pxd deleted file mode 100644 index 978b8efa182..00000000000 --- a/python/cudf/cudf/utils/gpu.pxd +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright (c) 2020, NVIDIA CORPORATION. - - -cdef extern from "cudf/utilities/device.hpp" namespace \ - "cudf::experimental" nogil: - - cdef int get_cuda_runtime_version() except + - cdef int get_gpu_device_count() except + - cdef int get_cuda_latest_supported_driver_version() except + diff --git a/python/cudf/cudf/utils/gpu.pyx b/python/cudf/cudf/utils/gpu.pyx deleted file mode 100644 index e991cca0750..00000000000 --- a/python/cudf/cudf/utils/gpu.pyx +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (c) 2020, NVIDIA CORPORATION. - -from cudf.utils.gpu cimport ( - get_cuda_runtime_version as cpp_get_cuda_runtime_version, - get_gpu_device_count as cpp_get_gpu_device_count, - get_cuda_latest_supported_driver_version as - cpp_get_cuda_latest_supported_driver_version -) - - -def get_cuda_runtime_version(): - cdef int c_result - with nogil: - c_result = cpp_get_cuda_runtime_version() - return c_result - - -def get_gpu_device_count(): - cdef int c_result - with nogil: - c_result = cpp_get_gpu_device_count() - return c_result - - -def get_cuda_latest_supported_driver_version(): - cdef int c_result - with nogil: - c_result = cpp_get_cuda_latest_supported_driver_version() - return c_result diff --git a/python/cudf/cudf/utils/gpu_utils.py b/python/cudf/cudf/utils/gpu_utils.py index 4f1f565ed04..d7712e967d0 100644 --- a/python/cudf/cudf/utils/gpu_utils.py +++ b/python/cudf/cudf/utils/gpu_utils.py @@ -1,16 +1,24 @@ def validate_setup(): - from .gpu import get_gpu_device_count + from cudf._cuda.gpu import ( + getDeviceCount, + driverGetVersion, + runtimeGetVersion, + getDeviceAttribute, + CudaDeviceAttr, + ) + import warnings - gpus_count = get_gpu_device_count() + gpus_count = getDeviceCount() if gpus_count > 0: # Cupy throws RunTimeException to get GPU count, # hence obtaining GPU count by in-house cpp api above - import cupy # 75 - Indicates to get "cudaDevAttrComputeCapabilityMajor" attribute # 0 - Get GPU 0 - major_version = cupy.cuda.runtime.deviceGetAttribute(75, 0) + major_version = getDeviceAttribute( + CudaDeviceAttr.cudaDevAttrComputeCapabilityMajor, 0 + ) if major_version >= 6: # You have a GPU with NVIDIA Pascal™ architecture or better @@ -18,19 +26,18 @@ def validate_setup(): # Turing 7.5 # Volta 7.x # Pascal 6.x - # Maxwell 5.x + # Maxwell 5.x # Kepler 3.x # Fermi 2.x pass else: - from cudf.errors import UnSupportedGPUError - raise UnSupportedGPUError( + warnings.warn( "You will need a GPU with NVIDIA Pascal™ architecture or \ better" ) - cuda_runtime_version = cupy.cuda.runtime.runtimeGetVersion() + cuda_runtime_version = runtimeGetVersion() if cuda_runtime_version > 10000: # CUDA Runtime Version Check: Runtime version is greater than 10000 @@ -42,13 +49,29 @@ def validate_setup(): "Please update your CUDA Runtime to 10.0 or above" ) - cuda_driver_version = cupy.cuda.runtime.driverGetVersion() + cuda_driver_supported_rt_version = driverGetVersion() + + # Though Yes, Externally driver version is represented like `418.39` + # and cuda runtime version like `10.1`. It is not the similar case + # at cuda api's level. Coming down to APIs they follow a uniform + # convention of an integer which corresponds to the versioning + # like (1000 major + 10 minor) for 10.1 Driver version API doesn't + # actually indicate driver version, it indicates only the latest + # CUDA version supported by the driver. + # For reference : + # https://docs.nvidia.com/deploy/cuda-compatibility/index.html - if cuda_driver_version == 0: + if cuda_driver_supported_rt_version == 0: from cudf.errors import UnSupportedCUDAError - raise UnSupportedCUDAError("Please install CUDA Driver") - elif cuda_driver_version >= cuda_runtime_version: + raise UnSupportedCUDAError( + "We couldn't detect the GPU driver\ + properly. Please follow the linux installation guide to\ + ensure your driver is properly installed.\ + : https://docs.nvidia.com/cuda/cuda-installation-guide-linux/" + ) + + elif cuda_driver_supported_rt_version >= cuda_runtime_version: # CUDA Driver Version Check: # Driver Runtime version is >= Runtime version pass @@ -63,11 +86,10 @@ def validate_setup(): + "\n" "Latest version of CUDA \ supported by current NVIDIA GPU Driver : " - + str(cuda_driver_version) + + str(cuda_driver_supported_rt_version) ) else: - import warnings warnings.warn( "You donot have an NVIDIA GPU, please install one and try again" From e4c3288be40164385f3b2bac1991c304ba5bb906 Mon Sep 17 00:00:00 2001 From: galipremsagar Date: Thu, 26 Mar 2020 18:39:22 -0700 Subject: [PATCH 10/21] remove cpp file --- python/cudf/cudf/_cuda/gpu.cpp | 5207 -------------------------------- 1 file changed, 5207 deletions(-) delete mode 100644 python/cudf/cudf/_cuda/gpu.cpp diff --git a/python/cudf/cudf/_cuda/gpu.cpp b/python/cudf/cudf/_cuda/gpu.cpp deleted file mode 100644 index cca7eaedef8..00000000000 --- a/python/cudf/cudf/_cuda/gpu.cpp +++ /dev/null @@ -1,5207 +0,0 @@ -/* Generated by Cython 0.29.15 */ - -/* BEGIN: Cython Metadata -{ - "distutils": { - "depends": [ - "/usr/local/cuda/include/cuda.h", - "/usr/local/cuda/include/cuda_runtime_api.h" - ], - "extra_compile_args": [ - "-std=c++14" - ], - "include_dirs": [ - "../../cpp/include/cudf", - "../../cpp/include", - "../../cpp/build/include", - "../../thirdparty/cub", - "../../thirdparty/libcudacxx/include", - "/conda/envs/cudf/include", - "/conda/envs/cudf/lib/python3.7/site-packages/numpy/core/include", - "/usr/local/cuda/include" - ], - "language": "c++", - "libraries": [ - "cudf" - ], - "library_dirs": [ - "/conda/envs/cudf/lib/python3.7/site-packages", - "/conda/envs/cudf/lib" - ], - "name": "cudf._cuda.gpu", - "sources": [ - "cudf/_cuda/gpu.pyx" - ] - }, - "module_name": "cudf._cuda.gpu" -} -END: Cython Metadata */ - -#define PY_SSIZE_T_CLEAN -#include "Python.h" -#ifndef Py_PYTHON_H - #error Python headers needed to compile C extensions, please install development version of Python. -#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) - #error Cython requires Python 2.6+ or Python 3.3+. -#else -#define CYTHON_ABI "0_29_15" -#define CYTHON_HEX_VERSION 0x001D0FF0 -#define CYTHON_FUTURE_DIVISION 1 -#include -#ifndef offsetof - #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) -#endif -#if !defined(WIN32) && !defined(MS_WINDOWS) - #ifndef __stdcall - #define __stdcall - #endif - #ifndef __cdecl - #define __cdecl - #endif - #ifndef __fastcall - #define __fastcall - #endif -#endif -#ifndef DL_IMPORT - #define DL_IMPORT(t) t -#endif -#ifndef DL_EXPORT - #define DL_EXPORT(t) t -#endif -#define __PYX_COMMA , -#ifndef HAVE_LONG_LONG - #if PY_VERSION_HEX >= 0x02070000 - #define HAVE_LONG_LONG - #endif -#endif -#ifndef PY_LONG_LONG - #define PY_LONG_LONG LONG_LONG -#endif -#ifndef Py_HUGE_VAL - #define Py_HUGE_VAL HUGE_VAL -#endif -#ifdef PYPY_VERSION - #define CYTHON_COMPILING_IN_PYPY 1 - #define CYTHON_COMPILING_IN_PYSTON 0 - #define CYTHON_COMPILING_IN_CPYTHON 0 - #undef CYTHON_USE_TYPE_SLOTS - #define CYTHON_USE_TYPE_SLOTS 0 - #undef CYTHON_USE_PYTYPE_LOOKUP - #define CYTHON_USE_PYTYPE_LOOKUP 0 - #if PY_VERSION_HEX < 0x03050000 - #undef CYTHON_USE_ASYNC_SLOTS - #define CYTHON_USE_ASYNC_SLOTS 0 - #elif !defined(CYTHON_USE_ASYNC_SLOTS) - #define CYTHON_USE_ASYNC_SLOTS 1 - #endif - #undef CYTHON_USE_PYLIST_INTERNALS - #define CYTHON_USE_PYLIST_INTERNALS 0 - #undef CYTHON_USE_UNICODE_INTERNALS - #define CYTHON_USE_UNICODE_INTERNALS 0 - #undef CYTHON_USE_UNICODE_WRITER - #define CYTHON_USE_UNICODE_WRITER 0 - #undef CYTHON_USE_PYLONG_INTERNALS - #define CYTHON_USE_PYLONG_INTERNALS 0 - #undef CYTHON_AVOID_BORROWED_REFS - #define CYTHON_AVOID_BORROWED_REFS 1 - #undef CYTHON_ASSUME_SAFE_MACROS - #define CYTHON_ASSUME_SAFE_MACROS 0 - #undef CYTHON_UNPACK_METHODS - #define CYTHON_UNPACK_METHODS 0 - #undef CYTHON_FAST_THREAD_STATE - #define CYTHON_FAST_THREAD_STATE 0 - #undef CYTHON_FAST_PYCALL - #define CYTHON_FAST_PYCALL 0 - #undef CYTHON_PEP489_MULTI_PHASE_INIT - #define CYTHON_PEP489_MULTI_PHASE_INIT 0 - #undef CYTHON_USE_TP_FINALIZE - #define CYTHON_USE_TP_FINALIZE 0 - #undef CYTHON_USE_DICT_VERSIONS - #define CYTHON_USE_DICT_VERSIONS 0 - #undef CYTHON_USE_EXC_INFO_STACK - #define CYTHON_USE_EXC_INFO_STACK 0 -#elif defined(PYSTON_VERSION) - #define CYTHON_COMPILING_IN_PYPY 0 - #define CYTHON_COMPILING_IN_PYSTON 1 - #define CYTHON_COMPILING_IN_CPYTHON 0 - #ifndef CYTHON_USE_TYPE_SLOTS - #define CYTHON_USE_TYPE_SLOTS 1 - #endif - #undef CYTHON_USE_PYTYPE_LOOKUP - #define CYTHON_USE_PYTYPE_LOOKUP 0 - #undef CYTHON_USE_ASYNC_SLOTS - #define CYTHON_USE_ASYNC_SLOTS 0 - #undef CYTHON_USE_PYLIST_INTERNALS - #define CYTHON_USE_PYLIST_INTERNALS 0 - #ifndef CYTHON_USE_UNICODE_INTERNALS - #define CYTHON_USE_UNICODE_INTERNALS 1 - #endif - #undef CYTHON_USE_UNICODE_WRITER - #define CYTHON_USE_UNICODE_WRITER 0 - #undef CYTHON_USE_PYLONG_INTERNALS - #define CYTHON_USE_PYLONG_INTERNALS 0 - #ifndef CYTHON_AVOID_BORROWED_REFS - #define CYTHON_AVOID_BORROWED_REFS 0 - #endif - #ifndef CYTHON_ASSUME_SAFE_MACROS - #define CYTHON_ASSUME_SAFE_MACROS 1 - #endif - #ifndef CYTHON_UNPACK_METHODS - #define CYTHON_UNPACK_METHODS 1 - #endif - #undef CYTHON_FAST_THREAD_STATE - #define CYTHON_FAST_THREAD_STATE 0 - #undef CYTHON_FAST_PYCALL - #define CYTHON_FAST_PYCALL 0 - #undef CYTHON_PEP489_MULTI_PHASE_INIT - #define CYTHON_PEP489_MULTI_PHASE_INIT 0 - #undef CYTHON_USE_TP_FINALIZE - #define CYTHON_USE_TP_FINALIZE 0 - #undef CYTHON_USE_DICT_VERSIONS - #define CYTHON_USE_DICT_VERSIONS 0 - #undef CYTHON_USE_EXC_INFO_STACK - #define CYTHON_USE_EXC_INFO_STACK 0 -#else - #define CYTHON_COMPILING_IN_PYPY 0 - #define CYTHON_COMPILING_IN_PYSTON 0 - #define CYTHON_COMPILING_IN_CPYTHON 1 - #ifndef CYTHON_USE_TYPE_SLOTS - #define CYTHON_USE_TYPE_SLOTS 1 - #endif - #if PY_VERSION_HEX < 0x02070000 - #undef CYTHON_USE_PYTYPE_LOOKUP - #define CYTHON_USE_PYTYPE_LOOKUP 0 - #elif !defined(CYTHON_USE_PYTYPE_LOOKUP) - #define CYTHON_USE_PYTYPE_LOOKUP 1 - #endif - #if PY_MAJOR_VERSION < 3 - #undef CYTHON_USE_ASYNC_SLOTS - #define CYTHON_USE_ASYNC_SLOTS 0 - #elif !defined(CYTHON_USE_ASYNC_SLOTS) - #define CYTHON_USE_ASYNC_SLOTS 1 - #endif - #if PY_VERSION_HEX < 0x02070000 - #undef CYTHON_USE_PYLONG_INTERNALS - #define CYTHON_USE_PYLONG_INTERNALS 0 - #elif !defined(CYTHON_USE_PYLONG_INTERNALS) - #define CYTHON_USE_PYLONG_INTERNALS 1 - #endif - #ifndef CYTHON_USE_PYLIST_INTERNALS - #define CYTHON_USE_PYLIST_INTERNALS 1 - #endif - #ifndef CYTHON_USE_UNICODE_INTERNALS - #define CYTHON_USE_UNICODE_INTERNALS 1 - #endif - #if PY_VERSION_HEX < 0x030300F0 - #undef CYTHON_USE_UNICODE_WRITER - #define CYTHON_USE_UNICODE_WRITER 0 - #elif !defined(CYTHON_USE_UNICODE_WRITER) - #define CYTHON_USE_UNICODE_WRITER 1 - #endif - #ifndef CYTHON_AVOID_BORROWED_REFS - #define CYTHON_AVOID_BORROWED_REFS 0 - #endif - #ifndef CYTHON_ASSUME_SAFE_MACROS - #define CYTHON_ASSUME_SAFE_MACROS 1 - #endif - #ifndef CYTHON_UNPACK_METHODS - #define CYTHON_UNPACK_METHODS 1 - #endif - #ifndef CYTHON_FAST_THREAD_STATE - #define CYTHON_FAST_THREAD_STATE 1 - #endif - #ifndef CYTHON_FAST_PYCALL - #define CYTHON_FAST_PYCALL 1 - #endif - #ifndef CYTHON_PEP489_MULTI_PHASE_INIT - #define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000) - #endif - #ifndef CYTHON_USE_TP_FINALIZE - #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1) - #endif - #ifndef CYTHON_USE_DICT_VERSIONS - #define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1) - #endif - #ifndef CYTHON_USE_EXC_INFO_STACK - #define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3) - #endif -#endif -#if !defined(CYTHON_FAST_PYCCALL) -#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) -#endif -#if CYTHON_USE_PYLONG_INTERNALS - #include "longintrepr.h" - #undef SHIFT - #undef BASE - #undef MASK - #ifdef SIZEOF_VOID_P - enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) }; - #endif -#endif -#ifndef __has_attribute - #define __has_attribute(x) 0 -#endif -#ifndef __has_cpp_attribute - #define __has_cpp_attribute(x) 0 -#endif -#ifndef CYTHON_RESTRICT - #if defined(__GNUC__) - #define CYTHON_RESTRICT __restrict__ - #elif defined(_MSC_VER) && _MSC_VER >= 1400 - #define CYTHON_RESTRICT __restrict - #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L - #define CYTHON_RESTRICT restrict - #else - #define CYTHON_RESTRICT - #endif -#endif -#ifndef CYTHON_UNUSED -# if defined(__GNUC__) -# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) -# define CYTHON_UNUSED __attribute__ ((__unused__)) -# else -# define CYTHON_UNUSED -# endif -# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) -# define CYTHON_UNUSED __attribute__ ((__unused__)) -# else -# define CYTHON_UNUSED -# endif -#endif -#ifndef CYTHON_MAYBE_UNUSED_VAR -# if defined(__cplusplus) - template void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } -# else -# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x) -# endif -#endif -#ifndef CYTHON_NCP_UNUSED -# if CYTHON_COMPILING_IN_CPYTHON -# define CYTHON_NCP_UNUSED -# else -# define CYTHON_NCP_UNUSED CYTHON_UNUSED -# endif -#endif -#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) -#ifdef _MSC_VER - #ifndef _MSC_STDINT_H_ - #if _MSC_VER < 1300 - typedef unsigned char uint8_t; - typedef unsigned int uint32_t; - #else - typedef unsigned __int8 uint8_t; - typedef unsigned __int32 uint32_t; - #endif - #endif -#else - #include -#endif -#ifndef CYTHON_FALLTHROUGH - #if defined(__cplusplus) && __cplusplus >= 201103L - #if __has_cpp_attribute(fallthrough) - #define CYTHON_FALLTHROUGH [[fallthrough]] - #elif __has_cpp_attribute(clang::fallthrough) - #define CYTHON_FALLTHROUGH [[clang::fallthrough]] - #elif __has_cpp_attribute(gnu::fallthrough) - #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] - #endif - #endif - #ifndef CYTHON_FALLTHROUGH - #if __has_attribute(fallthrough) - #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) - #else - #define CYTHON_FALLTHROUGH - #endif - #endif - #if defined(__clang__ ) && defined(__apple_build_version__) - #if __apple_build_version__ < 7000000 - #undef CYTHON_FALLTHROUGH - #define CYTHON_FALLTHROUGH - #endif - #endif -#endif - -#ifndef __cplusplus - #error "Cython files generated with the C++ option must be compiled with a C++ compiler." -#endif -#ifndef CYTHON_INLINE - #if defined(__clang__) - #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) - #else - #define CYTHON_INLINE inline - #endif -#endif -template -void __Pyx_call_destructor(T& x) { - x.~T(); -} -template -class __Pyx_FakeReference { - public: - __Pyx_FakeReference() : ptr(NULL) { } - __Pyx_FakeReference(const T& ref) : ptr(const_cast(&ref)) { } - T *operator->() { return ptr; } - T *operator&() { return ptr; } - operator T&() { return *ptr; } - template bool operator ==(U other) { return *ptr == other; } - template bool operator !=(U other) { return *ptr != other; } - private: - T *ptr; -}; - -#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) - #define Py_OptimizeFlag 0 -#endif -#define __PYX_BUILD_PY_SSIZE_T "n" -#define CYTHON_FORMAT_SSIZE_T "z" -#if PY_MAJOR_VERSION < 3 - #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" - #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ - PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) - #define __Pyx_DefaultClassType PyClass_Type -#else - #define __Pyx_BUILTIN_MODULE_NAME "builtins" -#if PY_VERSION_HEX >= 0x030800A4 && PY_VERSION_HEX < 0x030800B2 - #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ - PyCode_New(a, 0, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) -#else - #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ - PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) -#endif - #define __Pyx_DefaultClassType PyType_Type -#endif -#ifndef Py_TPFLAGS_CHECKTYPES - #define Py_TPFLAGS_CHECKTYPES 0 -#endif -#ifndef Py_TPFLAGS_HAVE_INDEX - #define Py_TPFLAGS_HAVE_INDEX 0 -#endif -#ifndef Py_TPFLAGS_HAVE_NEWBUFFER - #define Py_TPFLAGS_HAVE_NEWBUFFER 0 -#endif -#ifndef Py_TPFLAGS_HAVE_FINALIZE - #define Py_TPFLAGS_HAVE_FINALIZE 0 -#endif -#ifndef METH_STACKLESS - #define METH_STACKLESS 0 -#endif -#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL) - #ifndef METH_FASTCALL - #define METH_FASTCALL 0x80 - #endif - typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs); - typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args, - Py_ssize_t nargs, PyObject *kwnames); -#else - #define __Pyx_PyCFunctionFast _PyCFunctionFast - #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords -#endif -#if CYTHON_FAST_PYCCALL -#define __Pyx_PyFastCFunction_Check(func)\ - ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))))) -#else -#define __Pyx_PyFastCFunction_Check(func) 0 -#endif -#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) - #define PyObject_Malloc(s) PyMem_Malloc(s) - #define PyObject_Free(p) PyMem_Free(p) - #define PyObject_Realloc(p) PyMem_Realloc(p) -#endif -#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1 - #define PyMem_RawMalloc(n) PyMem_Malloc(n) - #define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n) - #define PyMem_RawFree(p) PyMem_Free(p) -#endif -#if CYTHON_COMPILING_IN_PYSTON - #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) - #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) -#else - #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) - #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) -#endif -#if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000 - #define __Pyx_PyThreadState_Current PyThreadState_GET() -#elif PY_VERSION_HEX >= 0x03060000 - #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() -#elif PY_VERSION_HEX >= 0x03000000 - #define __Pyx_PyThreadState_Current PyThreadState_GET() -#else - #define __Pyx_PyThreadState_Current _PyThreadState_Current -#endif -#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT) -#include "pythread.h" -#define Py_tss_NEEDS_INIT 0 -typedef int Py_tss_t; -static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) { - *key = PyThread_create_key(); - return 0; -} -static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) { - Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t)); - *key = Py_tss_NEEDS_INIT; - return key; -} -static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) { - PyObject_Free(key); -} -static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) { - return *key != Py_tss_NEEDS_INIT; -} -static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) { - PyThread_delete_key(*key); - *key = Py_tss_NEEDS_INIT; -} -static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) { - return PyThread_set_key_value(*key, value); -} -static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { - return PyThread_get_key_value(*key); -} -#endif -#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) -#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) -#else -#define __Pyx_PyDict_NewPresized(n) PyDict_New() -#endif -#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION - #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) - #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) -#else - #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) - #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) -#endif -#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS -#define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash) -#else -#define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name) -#endif -#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) - #define CYTHON_PEP393_ENABLED 1 - #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ - 0 : _PyUnicode_Ready((PyObject *)(op))) - #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) - #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) - #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) - #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) - #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) - #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) - #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) - #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) -#else - #define CYTHON_PEP393_ENABLED 0 - #define PyUnicode_1BYTE_KIND 1 - #define PyUnicode_2BYTE_KIND 2 - #define PyUnicode_4BYTE_KIND 4 - #define __Pyx_PyUnicode_READY(op) (0) - #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) - #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) - #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) - #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) - #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) - #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) - #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) - #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) -#endif -#if CYTHON_COMPILING_IN_PYPY - #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) - #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) -#else - #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) - #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ - PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) -#endif -#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) - #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) -#endif -#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) - #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) -#endif -#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) - #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) -#endif -#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) -#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) -#if PY_MAJOR_VERSION >= 3 - #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) -#else - #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) -#endif -#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) - #define PyObject_ASCII(o) PyObject_Repr(o) -#endif -#if PY_MAJOR_VERSION >= 3 - #define PyBaseString_Type PyUnicode_Type - #define PyStringObject PyUnicodeObject - #define PyString_Type PyUnicode_Type - #define PyString_Check PyUnicode_Check - #define PyString_CheckExact PyUnicode_CheckExact - #define PyObject_Unicode PyObject_Str -#endif -#if PY_MAJOR_VERSION >= 3 - #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) - #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) -#else - #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) - #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) -#endif -#ifndef PySet_CheckExact - #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) -#endif -#if CYTHON_ASSUME_SAFE_MACROS - #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq) -#else - #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq) -#endif -#if PY_MAJOR_VERSION >= 3 - #define PyIntObject PyLongObject - #define PyInt_Type PyLong_Type - #define PyInt_Check(op) PyLong_Check(op) - #define PyInt_CheckExact(op) PyLong_CheckExact(op) - #define PyInt_FromString PyLong_FromString - #define PyInt_FromUnicode PyLong_FromUnicode - #define PyInt_FromLong PyLong_FromLong - #define PyInt_FromSize_t PyLong_FromSize_t - #define PyInt_FromSsize_t PyLong_FromSsize_t - #define PyInt_AsLong PyLong_AsLong - #define PyInt_AS_LONG PyLong_AS_LONG - #define PyInt_AsSsize_t PyLong_AsSsize_t - #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask - #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask - #define PyNumber_Int PyNumber_Long -#endif -#if PY_MAJOR_VERSION >= 3 - #define PyBoolObject PyLongObject -#endif -#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY - #ifndef PyUnicode_InternFromString - #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) - #endif -#endif -#if PY_VERSION_HEX < 0x030200A4 - typedef long Py_hash_t; - #define __Pyx_PyInt_FromHash_t PyInt_FromLong - #define __Pyx_PyInt_AsHash_t PyInt_AsLong -#else - #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t - #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t -#endif -#if PY_MAJOR_VERSION >= 3 - #define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : (Py_INCREF(func), func)) -#else - #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) -#endif -#if CYTHON_USE_ASYNC_SLOTS - #if PY_VERSION_HEX >= 0x030500B1 - #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods - #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) - #else - #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) - #endif -#else - #define __Pyx_PyType_AsAsync(obj) NULL -#endif -#ifndef __Pyx_PyAsyncMethodsStruct - typedef struct { - unaryfunc am_await; - unaryfunc am_aiter; - unaryfunc am_anext; - } __Pyx_PyAsyncMethodsStruct; -#endif - -#if defined(WIN32) || defined(MS_WINDOWS) - #define _USE_MATH_DEFINES -#endif -#include -#ifdef NAN -#define __PYX_NAN() ((float) NAN) -#else -static CYTHON_INLINE float __PYX_NAN() { - float value; - memset(&value, 0xFF, sizeof(value)); - return value; -} -#endif -#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) -#define __Pyx_truncl trunc -#else -#define __Pyx_truncl truncl -#endif - - -#define __PYX_ERR(f_index, lineno, Ln_error) \ -{ \ - __pyx_filename = __pyx_f[f_index]; __pyx_lineno = lineno; __pyx_clineno = __LINE__; goto Ln_error; \ -} - -#ifndef __PYX_EXTERN_C - #ifdef __cplusplus - #define __PYX_EXTERN_C extern "C" - #else - #define __PYX_EXTERN_C extern - #endif -#endif - -#define __PYX_HAVE__cudf___cuda__gpu -#define __PYX_HAVE_API__cudf___cuda__gpu -/* Early includes */ -#include "cuda.h" -#include "cuda_runtime_api.h" -#ifdef _OPENMP -#include -#endif /* _OPENMP */ - -#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) -#define CYTHON_WITHOUT_ASSERTIONS -#endif - -typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; - const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; - -#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 -#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0 -#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8) -#define __PYX_DEFAULT_STRING_ENCODING "" -#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString -#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize -#define __Pyx_uchar_cast(c) ((unsigned char)c) -#define __Pyx_long_cast(x) ((long)x) -#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ - (sizeof(type) < sizeof(Py_ssize_t)) ||\ - (sizeof(type) > sizeof(Py_ssize_t) &&\ - likely(v < (type)PY_SSIZE_T_MAX ||\ - v == (type)PY_SSIZE_T_MAX) &&\ - (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ - v == (type)PY_SSIZE_T_MIN))) ||\ - (sizeof(type) == sizeof(Py_ssize_t) &&\ - (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ - v == (type)PY_SSIZE_T_MAX))) ) -static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) { - return (size_t) i < (size_t) limit; -} -#if defined (__cplusplus) && __cplusplus >= 201103L - #include - #define __Pyx_sst_abs(value) std::abs(value) -#elif SIZEOF_INT >= SIZEOF_SIZE_T - #define __Pyx_sst_abs(value) abs(value) -#elif SIZEOF_LONG >= SIZEOF_SIZE_T - #define __Pyx_sst_abs(value) labs(value) -#elif defined (_MSC_VER) - #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) -#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L - #define __Pyx_sst_abs(value) llabs(value) -#elif defined (__GNUC__) - #define __Pyx_sst_abs(value) __builtin_llabs(value) -#else - #define __Pyx_sst_abs(value) ((value<0) ? -value : value) -#endif -static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); -static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); -#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) -#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) -#define __Pyx_PyBytes_FromString PyBytes_FromString -#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize -static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); -#if PY_MAJOR_VERSION < 3 - #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString - #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize -#else - #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString - #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize -#endif -#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s)) -#define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) -#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) -#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) -#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) -#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) -#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) -#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) -#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) -#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) -static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { - const Py_UNICODE *u_end = u; - while (*u_end++) ; - return (size_t)(u_end - u - 1); -} -#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) -#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode -#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode -#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) -#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) -static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b); -static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); -static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*); -static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); -#define __Pyx_PySequence_Tuple(obj)\ - (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) -static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); -static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); -#if CYTHON_ASSUME_SAFE_MACROS -#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) -#else -#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) -#endif -#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) -#if PY_MAJOR_VERSION >= 3 -#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) -#else -#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) -#endif -#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) -#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII -static int __Pyx_sys_getdefaultencoding_not_ascii; -static int __Pyx_init_sys_getdefaultencoding_params(void) { - PyObject* sys; - PyObject* default_encoding = NULL; - PyObject* ascii_chars_u = NULL; - PyObject* ascii_chars_b = NULL; - const char* default_encoding_c; - sys = PyImport_ImportModule("sys"); - if (!sys) goto bad; - default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); - Py_DECREF(sys); - if (!default_encoding) goto bad; - default_encoding_c = PyBytes_AsString(default_encoding); - if (!default_encoding_c) goto bad; - if (strcmp(default_encoding_c, "ascii") == 0) { - __Pyx_sys_getdefaultencoding_not_ascii = 0; - } else { - char ascii_chars[128]; - int c; - for (c = 0; c < 128; c++) { - ascii_chars[c] = c; - } - __Pyx_sys_getdefaultencoding_not_ascii = 1; - ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); - if (!ascii_chars_u) goto bad; - ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); - if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { - PyErr_Format( - PyExc_ValueError, - "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", - default_encoding_c); - goto bad; - } - Py_DECREF(ascii_chars_u); - Py_DECREF(ascii_chars_b); - } - Py_DECREF(default_encoding); - return 0; -bad: - Py_XDECREF(default_encoding); - Py_XDECREF(ascii_chars_u); - Py_XDECREF(ascii_chars_b); - return -1; -} -#endif -#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 -#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) -#else -#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) -#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT -static char* __PYX_DEFAULT_STRING_ENCODING; -static int __Pyx_init_sys_getdefaultencoding_params(void) { - PyObject* sys; - PyObject* default_encoding = NULL; - char* default_encoding_c; - sys = PyImport_ImportModule("sys"); - if (!sys) goto bad; - default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); - Py_DECREF(sys); - if (!default_encoding) goto bad; - default_encoding_c = PyBytes_AsString(default_encoding); - if (!default_encoding_c) goto bad; - __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1); - if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; - strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); - Py_DECREF(default_encoding); - return 0; -bad: - Py_XDECREF(default_encoding); - return -1; -} -#endif -#endif - - -/* Test for GCC > 2.95 */ -#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) - #define likely(x) __builtin_expect(!!(x), 1) - #define unlikely(x) __builtin_expect(!!(x), 0) -#else /* !__GNUC__ or GCC < 2.95 */ - #define likely(x) (x) - #define unlikely(x) (x) -#endif /* __GNUC__ */ -static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } - -static PyObject *__pyx_m = NULL; -static PyObject *__pyx_d; -static PyObject *__pyx_b; -static PyObject *__pyx_cython_runtime = NULL; -static PyObject *__pyx_empty_tuple; -static PyObject *__pyx_empty_bytes; -static PyObject *__pyx_empty_unicode; -static int __pyx_lineno; -static int __pyx_clineno = 0; -static const char * __pyx_cfilenm= __FILE__; -static const char *__pyx_filename; - - -static const char *__pyx_f[] = { - "cudf/_cuda/gpu.pyx", -}; - -/* "cudf/_cuda/gpu.pxd":113 - * int cudaDeviceGetAttribute(int* value, cudaDeviceAttr attr, int device) - * - * ctypedef int underlying_type_attribute # <<<<<<<<<<<<<< - */ -typedef int __pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute; - -/*--- Type declarations ---*/ - -/* --- Runtime support code (head) --- */ -/* Refnanny.proto */ -#ifndef CYTHON_REFNANNY - #define CYTHON_REFNANNY 0 -#endif -#if CYTHON_REFNANNY - typedef struct { - void (*INCREF)(void*, PyObject*, int); - void (*DECREF)(void*, PyObject*, int); - void (*GOTREF)(void*, PyObject*, int); - void (*GIVEREF)(void*, PyObject*, int); - void* (*SetupContext)(const char*, int, const char*); - void (*FinishContext)(void**); - } __Pyx_RefNannyAPIStruct; - static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; - static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); - #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; -#ifdef WITH_THREAD - #define __Pyx_RefNannySetupContext(name, acquire_gil)\ - if (acquire_gil) {\ - PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ - __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ - PyGILState_Release(__pyx_gilstate_save);\ - } else {\ - __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ - } -#else - #define __Pyx_RefNannySetupContext(name, acquire_gil)\ - __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) -#endif - #define __Pyx_RefNannyFinishContext()\ - __Pyx_RefNanny->FinishContext(&__pyx_refnanny) - #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) - #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) - #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) - #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) -#else - #define __Pyx_RefNannyDeclarations - #define __Pyx_RefNannySetupContext(name, acquire_gil) - #define __Pyx_RefNannyFinishContext() - #define __Pyx_INCREF(r) Py_INCREF(r) - #define __Pyx_DECREF(r) Py_DECREF(r) - #define __Pyx_GOTREF(r) - #define __Pyx_GIVEREF(r) - #define __Pyx_XINCREF(r) Py_XINCREF(r) - #define __Pyx_XDECREF(r) Py_XDECREF(r) - #define __Pyx_XGOTREF(r) - #define __Pyx_XGIVEREF(r) -#endif -#define __Pyx_XDECREF_SET(r, v) do {\ - PyObject *tmp = (PyObject *) r;\ - r = v; __Pyx_XDECREF(tmp);\ - } while (0) -#define __Pyx_DECREF_SET(r, v) do {\ - PyObject *tmp = (PyObject *) r;\ - r = v; __Pyx_DECREF(tmp);\ - } while (0) -#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) -#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) - -/* RaiseArgTupleInvalid.proto */ -static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, - Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); - -/* RaiseDoubleKeywords.proto */ -static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); - -/* ParseKeywords.proto */ -static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ - PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ - const char* function_name); - -/* PyObjectGetAttrStr.proto */ -#if CYTHON_USE_TYPE_SLOTS -static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name); -#else -#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) -#endif - -/* Import.proto */ -static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); - -/* ImportFrom.proto */ -static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name); - -/* GetBuiltinName.proto */ -static PyObject *__Pyx_GetBuiltinName(PyObject *name); - -/* PyDictVersioning.proto */ -#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS -#define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1) -#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag) -#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\ - (version_var) = __PYX_GET_DICT_VERSION(dict);\ - (cache_var) = (value); -#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\ - static PY_UINT64_T __pyx_dict_version = 0;\ - static PyObject *__pyx_dict_cached_value = NULL;\ - if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\ - (VAR) = __pyx_dict_cached_value;\ - } else {\ - (VAR) = __pyx_dict_cached_value = (LOOKUP);\ - __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\ - }\ -} -static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj); -static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj); -static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version); -#else -#define __PYX_GET_DICT_VERSION(dict) (0) -#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var) -#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP); -#endif - -/* GetModuleGlobalName.proto */ -#if CYTHON_USE_DICT_VERSIONS -#define __Pyx_GetModuleGlobalName(var, name) {\ - static PY_UINT64_T __pyx_dict_version = 0;\ - static PyObject *__pyx_dict_cached_value = NULL;\ - (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\ - (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\ - __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ -} -#define __Pyx_GetModuleGlobalNameUncached(var, name) {\ - PY_UINT64_T __pyx_dict_version;\ - PyObject *__pyx_dict_cached_value;\ - (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ -} -static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value); -#else -#define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name) -#define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name) -static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name); -#endif - -/* CalculateMetaclass.proto */ -static PyObject *__Pyx_CalculateMetaclass(PyTypeObject *metaclass, PyObject *bases); - -/* SetNameInClass.proto */ -#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 -#define __Pyx_SetNameInClass(ns, name, value)\ - (likely(PyDict_CheckExact(ns)) ? _PyDict_SetItem_KnownHash(ns, name, value, ((PyASCIIObject *) name)->hash) : PyObject_SetItem(ns, name, value)) -#elif CYTHON_COMPILING_IN_CPYTHON -#define __Pyx_SetNameInClass(ns, name, value)\ - (likely(PyDict_CheckExact(ns)) ? PyDict_SetItem(ns, name, value) : PyObject_SetItem(ns, name, value)) -#else -#define __Pyx_SetNameInClass(ns, name, value) PyObject_SetItem(ns, name, value) -#endif - -/* Py3ClassCreate.proto */ -static PyObject *__Pyx_Py3MetaclassPrepare(PyObject *metaclass, PyObject *bases, PyObject *name, PyObject *qualname, - PyObject *mkw, PyObject *modname, PyObject *doc); -static PyObject *__Pyx_Py3ClassCreate(PyObject *metaclass, PyObject *name, PyObject *bases, PyObject *dict, - PyObject *mkw, int calculate_metaclass, int allow_py2_metaclass); - -/* PyThreadStateGet.proto */ -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; -#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; -#define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type -#else -#define __Pyx_PyThreadState_declare -#define __Pyx_PyThreadState_assign -#define __Pyx_PyErr_Occurred() PyErr_Occurred() -#endif - -/* PyErrFetchRestore.proto */ -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) -#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) -#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) -#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) -#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) -static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); -static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); -#if CYTHON_COMPILING_IN_CPYTHON -#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) -#else -#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) -#endif -#else -#define __Pyx_PyErr_Clear() PyErr_Clear() -#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) -#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) -#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) -#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) -#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) -#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) -#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) -#endif - -/* CLineInTraceback.proto */ -#ifdef CYTHON_CLINE_IN_TRACEBACK -#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) -#else -static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); -#endif - -/* CodeObjectCache.proto */ -typedef struct { - PyCodeObject* code_object; - int code_line; -} __Pyx_CodeObjectCacheEntry; -struct __Pyx_CodeObjectCache { - int count; - int max_count; - __Pyx_CodeObjectCacheEntry* entries; -}; -static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; -static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); -static PyCodeObject *__pyx_find_code_object(int code_line); -static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); - -/* AddTraceback.proto */ -static void __Pyx_AddTraceback(const char *funcname, int c_line, - int py_line, const char *filename); - -/* CIntToPy.proto */ -static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); - -/* CIntFromPy.proto */ -static CYTHON_INLINE enum cudaDeviceAttr __Pyx_PyInt_As_enum__cudaDeviceAttr(PyObject *); - -/* CIntFromPy.proto */ -static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); - -/* CIntToPy.proto */ -static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); - -/* CIntFromPy.proto */ -static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); - -/* FastTypeChecks.proto */ -#if CYTHON_COMPILING_IN_CPYTHON -#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) -static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); -static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); -static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); -#else -#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) -#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) -#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) -#endif -#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) - -/* CheckBinaryVersion.proto */ -static int __Pyx_check_binary_version(void); - -/* InitStrings.proto */ -static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); - - -/* Module declarations from 'cudf._cuda.gpu' */ -static int __pyx_f_4cudf_5_cuda_3gpu_driverGetVersion(int __pyx_skip_dispatch); /*proto*/ -static int __pyx_f_4cudf_5_cuda_3gpu_runtimeGetVersion(int __pyx_skip_dispatch); /*proto*/ -static int __pyx_f_4cudf_5_cuda_3gpu_getDeviceCount(int __pyx_skip_dispatch); /*proto*/ -static int __pyx_f_4cudf_5_cuda_3gpu_getDeviceAttribute(PyObject *, PyObject *, int __pyx_skip_dispatch); /*proto*/ -#define __Pyx_MODULE_NAME "cudf._cuda.gpu" -extern int __pyx_module_is_main_cudf___cuda__gpu; -int __pyx_module_is_main_cudf___cuda__gpu = 0; - -/* Implementation of 'cudf._cuda.gpu' */ -static const char __pyx_k_doc[] = "__doc__"; -static const char __pyx_k_attr[] = "attr"; -static const char __pyx_k_enum[] = "enum"; -static const char __pyx_k_main[] = "__main__"; -static const char __pyx_k_name[] = "__name__"; -static const char __pyx_k_test[] = "__test__"; -static const char __pyx_k_device[] = "device"; -static const char __pyx_k_import[] = "__import__"; -static const char __pyx_k_module[] = "__module__"; -static const char __pyx_k_IntEnum[] = "IntEnum"; -static const char __pyx_k_prepare[] = "__prepare__"; -static const char __pyx_k_qualname[] = "__qualname__"; -static const char __pyx_k_metaclass[] = "__metaclass__"; -static const char __pyx_k_CudaDeviceAttr[] = "CudaDeviceAttr"; -static const char __pyx_k_cudf__cuda_gpu[] = "cudf._cuda.gpu"; -static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; -static const char __pyx_k_cudaDevAttrMaxPitch[] = "cudaDevAttrMaxPitch"; -static const char __pyx_k_cudaDevAttrPciBusId[] = "cudaDevAttrPciBusId"; -static const char __pyx_k_cudaDevAttrWarpSize[] = "cudaDevAttrWarpSize"; -static const char __pyx_k_cudaDevAttrClockRate[] = "cudaDevAttrClockRate"; -static const char __pyx_k_cudaDevAttrTccDriver[] = "cudaDevAttrTccDriver"; -static const char __pyx_k_cudaDevAttrEccEnabled[] = "cudaDevAttrEccEnabled"; -static const char __pyx_k_cudaDevAttrGpuOverlap[] = "cudaDevAttrGpuOverlap"; -static const char __pyx_k_cudaDevAttrIntegrated[] = "cudaDevAttrIntegrated"; -static const char __pyx_k_cudaDevAttrReserved92[] = "cudaDevAttrReserved92"; -static const char __pyx_k_cudaDevAttrReserved93[] = "cudaDevAttrReserved93"; -static const char __pyx_k_cudaDevAttrReserved94[] = "cudaDevAttrReserved94"; -static const char __pyx_k_cudaDevAttrComputeMode[] = "cudaDevAttrComputeMode"; -static const char __pyx_k_cudaDevAttrL2CacheSize[] = "cudaDevAttrL2CacheSize"; -static const char __pyx_k_cudaDevAttrMaxGridDimX[] = "cudaDevAttrMaxGridDimX"; -static const char __pyx_k_cudaDevAttrMaxGridDimY[] = "cudaDevAttrMaxGridDimY"; -static const char __pyx_k_cudaDevAttrMaxGridDimZ[] = "cudaDevAttrMaxGridDimZ"; -static const char __pyx_k_cudaDevAttrPciDeviceId[] = "cudaDevAttrPciDeviceId"; -static const char __pyx_k_cudaDevAttrPciDomainId[] = "cudaDevAttrPciDomainId"; -static const char __pyx_k_cudaDevAttrMaxBlockDimX[] = "cudaDevAttrMaxBlockDimX"; -static const char __pyx_k_cudaDevAttrMaxBlockDimY[] = "cudaDevAttrMaxBlockDimY"; -static const char __pyx_k_cudaDevAttrMaxBlockDimZ[] = "cudaDevAttrMaxBlockDimZ"; -static const char __pyx_k_cudaDevAttrManagedMemory[] = "cudaDevAttrManagedMemory"; -static const char __pyx_k_cudaDevAttrIsMultiGpuBoard[] = "cudaDevAttrIsMultiGpuBoard"; -static const char __pyx_k_cudaDevAttrMemoryClockRate[] = "cudaDevAttrMemoryClockRate"; -static const char __pyx_k_cudaDevAttrAsyncEngineCount[] = "cudaDevAttrAsyncEngineCount"; -static const char __pyx_k_cudaDevAttrCanMapHostMemory[] = "cudaDevAttrCanMapHostMemory"; -static const char __pyx_k_cudaDevAttrSurfaceAlignment[] = "cudaDevAttrSurfaceAlignment"; -static const char __pyx_k_cudaDevAttrTextureAlignment[] = "cudaDevAttrTextureAlignment"; -static const char __pyx_k_cudaDevAttrConcurrentKernels[] = "cudaDevAttrConcurrentKernels"; -static const char __pyx_k_cudaDevAttrCooperativeLaunch[] = "cudaDevAttrCooperativeLaunch"; -static const char __pyx_k_cudaDevAttrKernelExecTimeout[] = "cudaDevAttrKernelExecTimeout"; -static const char __pyx_k_cudaDevAttrMaxSurface1DWidth[] = "cudaDevAttrMaxSurface1DWidth"; -static const char __pyx_k_cudaDevAttrMaxSurface2DWidth[] = "cudaDevAttrMaxSurface2DWidth"; -static const char __pyx_k_cudaDevAttrMaxSurface3DDepth[] = "cudaDevAttrMaxSurface3DDepth"; -static const char __pyx_k_cudaDevAttrMaxSurface3DWidth[] = "cudaDevAttrMaxSurface3DWidth"; -static const char __pyx_k_cudaDevAttrMaxTexture1DWidth[] = "cudaDevAttrMaxTexture1DWidth"; -static const char __pyx_k_cudaDevAttrMaxTexture2DWidth[] = "cudaDevAttrMaxTexture2DWidth"; -static const char __pyx_k_cudaDevAttrMaxTexture3DDepth[] = "cudaDevAttrMaxTexture3DDepth"; -static const char __pyx_k_cudaDevAttrMaxTexture3DWidth[] = "cudaDevAttrMaxTexture3DWidth"; -static const char __pyx_k_cudaDevAttrUnifiedAddressing[] = "cudaDevAttrUnifiedAddressing"; -static const char __pyx_k_cudaDevAttrMaxSurface2DHeight[] = "cudaDevAttrMaxSurface2DHeight"; -static const char __pyx_k_cudaDevAttrMaxSurface3DHeight[] = "cudaDevAttrMaxSurface3DHeight"; -static const char __pyx_k_cudaDevAttrMaxTexture2DHeight[] = "cudaDevAttrMaxTexture2DHeight"; -static const char __pyx_k_cudaDevAttrMaxTexture3DHeight[] = "cudaDevAttrMaxTexture3DHeight"; -static const char __pyx_k_cudaDevAttrMaxThreadsPerBlock[] = "cudaDevAttrMaxThreadsPerBlock"; -static const char __pyx_k_cudaDevAttrMultiProcessorCount[] = "cudaDevAttrMultiProcessorCount"; -static const char __pyx_k_cudaDevAttrTotalConstantMemory[] = "cudaDevAttrTotalConstantMemory"; -static const char __pyx_k_cudaDevAttrCanFlushRemoteWrites[] = "cudaDevAttrCanFlushRemoteWrites"; -static const char __pyx_k_cudaDevAttrGlobalMemoryBusWidth[] = "cudaDevAttrGlobalMemoryBusWidth"; -static const char __pyx_k_cudaDevAttrMaxRegistersPerBlock[] = "cudaDevAttrMaxRegistersPerBlock"; -static const char __pyx_k_cudaDevAttrMaxTexture3DDepthAlt[] = "cudaDevAttrMaxTexture3DDepthAlt"; -static const char __pyx_k_cudaDevAttrMaxTexture3DWidthAlt[] = "cudaDevAttrMaxTexture3DWidthAlt"; -static const char __pyx_k_cudaDevAttrMultiGpuBoardGroupID[] = "cudaDevAttrMultiGpuBoardGroupID"; -static const char __pyx_k_cudaDevAttrPageableMemoryAccess[] = "cudaDevAttrPageableMemoryAccess"; -static const char __pyx_k_cudaDevAttrCanUseHostPointerForR[] = "cudaDevAttrCanUseHostPointerForRegisteredMem"; -static const char __pyx_k_cudaDevAttrComputeCapabilityMajo[] = "cudaDevAttrComputeCapabilityMajor"; -static const char __pyx_k_cudaDevAttrComputeCapabilityMino[] = "cudaDevAttrComputeCapabilityMinor"; -static const char __pyx_k_cudaDevAttrComputePreemptionSupp[] = "cudaDevAttrComputePreemptionSupported"; -static const char __pyx_k_cudaDevAttrConcurrentManagedAcce[] = "cudaDevAttrConcurrentManagedAccess"; -static const char __pyx_k_cudaDevAttrCooperativeMultiDevic[] = "cudaDevAttrCooperativeMultiDeviceLaunch"; -static const char __pyx_k_cudaDevAttrDirectManagedMemAcces[] = "cudaDevAttrDirectManagedMemAccessFromHost"; -static const char __pyx_k_cudaDevAttrGlobalL1CacheSupporte[] = "cudaDevAttrGlobalL1CacheSupported"; -static const char __pyx_k_cudaDevAttrHostNativeAtomicSuppo[] = "cudaDevAttrHostNativeAtomicSupported"; -static const char __pyx_k_cudaDevAttrHostRegisterSupported[] = "cudaDevAttrHostRegisterSupported"; -static const char __pyx_k_cudaDevAttrLocalL1CacheSupported[] = "cudaDevAttrLocalL1CacheSupported"; -static const char __pyx_k_cudaDevAttrMaxRegistersPerMultip[] = "cudaDevAttrMaxRegistersPerMultiprocessor"; -static const char __pyx_k_cudaDevAttrMaxSharedMemoryPerBlo[] = "cudaDevAttrMaxSharedMemoryPerBlock"; -static const char __pyx_k_cudaDevAttrMaxSharedMemoryPerMul[] = "cudaDevAttrMaxSharedMemoryPerMultiprocessor"; -static const char __pyx_k_cudaDevAttrMaxSurface1DLayeredLa[] = "cudaDevAttrMaxSurface1DLayeredLayers"; -static const char __pyx_k_cudaDevAttrMaxSurface1DLayeredWi[] = "cudaDevAttrMaxSurface1DLayeredWidth"; -static const char __pyx_k_cudaDevAttrMaxSurface2DLayeredHe[] = "cudaDevAttrMaxSurface2DLayeredHeight"; -static const char __pyx_k_cudaDevAttrMaxSurface2DLayeredLa[] = "cudaDevAttrMaxSurface2DLayeredLayers"; -static const char __pyx_k_cudaDevAttrMaxSurface2DLayeredWi[] = "cudaDevAttrMaxSurface2DLayeredWidth"; -static const char __pyx_k_cudaDevAttrMaxSurfaceCubemapLaye[] = "cudaDevAttrMaxSurfaceCubemapLayeredWidth"; -static const char __pyx_k_cudaDevAttrMaxSurfaceCubemapWidt[] = "cudaDevAttrMaxSurfaceCubemapWidth"; -static const char __pyx_k_cudaDevAttrMaxTexture1DLayeredLa[] = "cudaDevAttrMaxTexture1DLayeredLayers"; -static const char __pyx_k_cudaDevAttrMaxTexture1DLayeredWi[] = "cudaDevAttrMaxTexture1DLayeredWidth"; -static const char __pyx_k_cudaDevAttrMaxTexture1DLinearWid[] = "cudaDevAttrMaxTexture1DLinearWidth"; -static const char __pyx_k_cudaDevAttrMaxTexture1DMipmapped[] = "cudaDevAttrMaxTexture1DMipmappedWidth"; -static const char __pyx_k_cudaDevAttrMaxTexture2DGatherHei[] = "cudaDevAttrMaxTexture2DGatherHeight"; -static const char __pyx_k_cudaDevAttrMaxTexture2DGatherWid[] = "cudaDevAttrMaxTexture2DGatherWidth"; -static const char __pyx_k_cudaDevAttrMaxTexture2DLayeredHe[] = "cudaDevAttrMaxTexture2DLayeredHeight"; -static const char __pyx_k_cudaDevAttrMaxTexture2DLayeredLa[] = "cudaDevAttrMaxTexture2DLayeredLayers"; -static const char __pyx_k_cudaDevAttrMaxTexture2DLayeredWi[] = "cudaDevAttrMaxTexture2DLayeredWidth"; -static const char __pyx_k_cudaDevAttrMaxTexture2DLinearHei[] = "cudaDevAttrMaxTexture2DLinearHeight"; -static const char __pyx_k_cudaDevAttrMaxTexture2DLinearPit[] = "cudaDevAttrMaxTexture2DLinearPitch"; -static const char __pyx_k_cudaDevAttrMaxTexture2DLinearWid[] = "cudaDevAttrMaxTexture2DLinearWidth"; -static const char __pyx_k_cudaDevAttrMaxTexture2DMipmapped[] = "cudaDevAttrMaxTexture2DMipmappedWidth"; -static const char __pyx_k_cudaDevAttrMaxTexture3DHeightAlt[] = "cudaDevAttrMaxTexture3DHeightAlt"; -static const char __pyx_k_cudaDevAttrMaxTextureCubemapLaye[] = "cudaDevAttrMaxTextureCubemapLayeredWidth"; -static const char __pyx_k_cudaDevAttrMaxTextureCubemapWidt[] = "cudaDevAttrMaxTextureCubemapWidth"; -static const char __pyx_k_cudaDevAttrMaxThreadsPerMultiPro[] = "cudaDevAttrMaxThreadsPerMultiProcessor"; -static const char __pyx_k_cudaDevAttrPageableMemoryAccessU[] = "cudaDevAttrPageableMemoryAccessUsesHostPageTables"; -static const char __pyx_k_cudaDevAttrSingleToDoublePrecisi[] = "cudaDevAttrSingleToDoublePrecisionPerfRatio"; -static const char __pyx_k_cudaDevAttrStreamPrioritiesSuppo[] = "cudaDevAttrStreamPrioritiesSupported"; -static const char __pyx_k_cudaDevAttrTexturePitchAlignment[] = "cudaDevAttrTexturePitchAlignment"; -static const char __pyx_k_cudaDevAttrMaxSharedMemoryPerBlo_2[] = "cudaDevAttrMaxSharedMemoryPerBlockOptin"; -static const char __pyx_k_cudaDevAttrMaxSurfaceCubemapLaye_2[] = "cudaDevAttrMaxSurfaceCubemapLayeredLayers"; -static const char __pyx_k_cudaDevAttrMaxTexture2DMipmapped_2[] = "cudaDevAttrMaxTexture2DMipmappedHeight"; -static const char __pyx_k_cudaDevAttrMaxTextureCubemapLaye_2[] = "cudaDevAttrMaxTextureCubemapLayeredLayers"; -static PyObject *__pyx_n_s_CudaDeviceAttr; -static PyObject *__pyx_n_s_IntEnum; -static PyObject *__pyx_n_s_attr; -static PyObject *__pyx_n_s_cline_in_traceback; -static PyObject *__pyx_n_s_cudaDevAttrAsyncEngineCount; -static PyObject *__pyx_n_s_cudaDevAttrCanFlushRemoteWrites; -static PyObject *__pyx_n_s_cudaDevAttrCanMapHostMemory; -static PyObject *__pyx_n_s_cudaDevAttrCanUseHostPointerForR; -static PyObject *__pyx_n_s_cudaDevAttrClockRate; -static PyObject *__pyx_n_s_cudaDevAttrComputeCapabilityMajo; -static PyObject *__pyx_n_s_cudaDevAttrComputeCapabilityMino; -static PyObject *__pyx_n_s_cudaDevAttrComputeMode; -static PyObject *__pyx_n_s_cudaDevAttrComputePreemptionSupp; -static PyObject *__pyx_n_s_cudaDevAttrConcurrentKernels; -static PyObject *__pyx_n_s_cudaDevAttrConcurrentManagedAcce; -static PyObject *__pyx_n_s_cudaDevAttrCooperativeLaunch; -static PyObject *__pyx_n_s_cudaDevAttrCooperativeMultiDevic; -static PyObject *__pyx_n_s_cudaDevAttrDirectManagedMemAcces; -static PyObject *__pyx_n_s_cudaDevAttrEccEnabled; -static PyObject *__pyx_n_s_cudaDevAttrGlobalL1CacheSupporte; -static PyObject *__pyx_n_s_cudaDevAttrGlobalMemoryBusWidth; -static PyObject *__pyx_n_s_cudaDevAttrGpuOverlap; -static PyObject *__pyx_n_s_cudaDevAttrHostNativeAtomicSuppo; -static PyObject *__pyx_n_s_cudaDevAttrHostRegisterSupported; -static PyObject *__pyx_n_s_cudaDevAttrIntegrated; -static PyObject *__pyx_n_s_cudaDevAttrIsMultiGpuBoard; -static PyObject *__pyx_n_s_cudaDevAttrKernelExecTimeout; -static PyObject *__pyx_n_s_cudaDevAttrL2CacheSize; -static PyObject *__pyx_n_s_cudaDevAttrLocalL1CacheSupported; -static PyObject *__pyx_n_s_cudaDevAttrManagedMemory; -static PyObject *__pyx_n_s_cudaDevAttrMaxBlockDimX; -static PyObject *__pyx_n_s_cudaDevAttrMaxBlockDimY; -static PyObject *__pyx_n_s_cudaDevAttrMaxBlockDimZ; -static PyObject *__pyx_n_s_cudaDevAttrMaxGridDimX; -static PyObject *__pyx_n_s_cudaDevAttrMaxGridDimY; -static PyObject *__pyx_n_s_cudaDevAttrMaxGridDimZ; -static PyObject *__pyx_n_s_cudaDevAttrMaxPitch; -static PyObject *__pyx_n_s_cudaDevAttrMaxRegistersPerBlock; -static PyObject *__pyx_n_s_cudaDevAttrMaxRegistersPerMultip; -static PyObject *__pyx_n_s_cudaDevAttrMaxSharedMemoryPerBlo; -static PyObject *__pyx_n_s_cudaDevAttrMaxSharedMemoryPerBlo_2; -static PyObject *__pyx_n_s_cudaDevAttrMaxSharedMemoryPerMul; -static PyObject *__pyx_n_s_cudaDevAttrMaxSurface1DLayeredLa; -static PyObject *__pyx_n_s_cudaDevAttrMaxSurface1DLayeredWi; -static PyObject *__pyx_n_s_cudaDevAttrMaxSurface1DWidth; -static PyObject *__pyx_n_s_cudaDevAttrMaxSurface2DHeight; -static PyObject *__pyx_n_s_cudaDevAttrMaxSurface2DLayeredHe; -static PyObject *__pyx_n_s_cudaDevAttrMaxSurface2DLayeredLa; -static PyObject *__pyx_n_s_cudaDevAttrMaxSurface2DLayeredWi; -static PyObject *__pyx_n_s_cudaDevAttrMaxSurface2DWidth; -static PyObject *__pyx_n_s_cudaDevAttrMaxSurface3DDepth; -static PyObject *__pyx_n_s_cudaDevAttrMaxSurface3DHeight; -static PyObject *__pyx_n_s_cudaDevAttrMaxSurface3DWidth; -static PyObject *__pyx_n_s_cudaDevAttrMaxSurfaceCubemapLaye; -static PyObject *__pyx_n_s_cudaDevAttrMaxSurfaceCubemapLaye_2; -static PyObject *__pyx_n_s_cudaDevAttrMaxSurfaceCubemapWidt; -static PyObject *__pyx_n_s_cudaDevAttrMaxTexture1DLayeredLa; -static PyObject *__pyx_n_s_cudaDevAttrMaxTexture1DLayeredWi; -static PyObject *__pyx_n_s_cudaDevAttrMaxTexture1DLinearWid; -static PyObject *__pyx_n_s_cudaDevAttrMaxTexture1DMipmapped; -static PyObject *__pyx_n_s_cudaDevAttrMaxTexture1DWidth; -static PyObject *__pyx_n_s_cudaDevAttrMaxTexture2DGatherHei; -static PyObject *__pyx_n_s_cudaDevAttrMaxTexture2DGatherWid; -static PyObject *__pyx_n_s_cudaDevAttrMaxTexture2DHeight; -static PyObject *__pyx_n_s_cudaDevAttrMaxTexture2DLayeredHe; -static PyObject *__pyx_n_s_cudaDevAttrMaxTexture2DLayeredLa; -static PyObject *__pyx_n_s_cudaDevAttrMaxTexture2DLayeredWi; -static PyObject *__pyx_n_s_cudaDevAttrMaxTexture2DLinearHei; -static PyObject *__pyx_n_s_cudaDevAttrMaxTexture2DLinearPit; -static PyObject *__pyx_n_s_cudaDevAttrMaxTexture2DLinearWid; -static PyObject *__pyx_n_s_cudaDevAttrMaxTexture2DMipmapped; -static PyObject *__pyx_n_s_cudaDevAttrMaxTexture2DMipmapped_2; -static PyObject *__pyx_n_s_cudaDevAttrMaxTexture2DWidth; -static PyObject *__pyx_n_s_cudaDevAttrMaxTexture3DDepth; -static PyObject *__pyx_n_s_cudaDevAttrMaxTexture3DDepthAlt; -static PyObject *__pyx_n_s_cudaDevAttrMaxTexture3DHeight; -static PyObject *__pyx_n_s_cudaDevAttrMaxTexture3DHeightAlt; -static PyObject *__pyx_n_s_cudaDevAttrMaxTexture3DWidth; -static PyObject *__pyx_n_s_cudaDevAttrMaxTexture3DWidthAlt; -static PyObject *__pyx_n_s_cudaDevAttrMaxTextureCubemapLaye; -static PyObject *__pyx_n_s_cudaDevAttrMaxTextureCubemapLaye_2; -static PyObject *__pyx_n_s_cudaDevAttrMaxTextureCubemapWidt; -static PyObject *__pyx_n_s_cudaDevAttrMaxThreadsPerBlock; -static PyObject *__pyx_n_s_cudaDevAttrMaxThreadsPerMultiPro; -static PyObject *__pyx_n_s_cudaDevAttrMemoryClockRate; -static PyObject *__pyx_n_s_cudaDevAttrMultiGpuBoardGroupID; -static PyObject *__pyx_n_s_cudaDevAttrMultiProcessorCount; -static PyObject *__pyx_n_s_cudaDevAttrPageableMemoryAccess; -static PyObject *__pyx_n_s_cudaDevAttrPageableMemoryAccessU; -static PyObject *__pyx_n_s_cudaDevAttrPciBusId; -static PyObject *__pyx_n_s_cudaDevAttrPciDeviceId; -static PyObject *__pyx_n_s_cudaDevAttrPciDomainId; -static PyObject *__pyx_n_s_cudaDevAttrReserved92; -static PyObject *__pyx_n_s_cudaDevAttrReserved93; -static PyObject *__pyx_n_s_cudaDevAttrReserved94; -static PyObject *__pyx_n_s_cudaDevAttrSingleToDoublePrecisi; -static PyObject *__pyx_n_s_cudaDevAttrStreamPrioritiesSuppo; -static PyObject *__pyx_n_s_cudaDevAttrSurfaceAlignment; -static PyObject *__pyx_n_s_cudaDevAttrTccDriver; -static PyObject *__pyx_n_s_cudaDevAttrTextureAlignment; -static PyObject *__pyx_n_s_cudaDevAttrTexturePitchAlignment; -static PyObject *__pyx_n_s_cudaDevAttrTotalConstantMemory; -static PyObject *__pyx_n_s_cudaDevAttrUnifiedAddressing; -static PyObject *__pyx_n_s_cudaDevAttrWarpSize; -static PyObject *__pyx_n_s_cudf__cuda_gpu; -static PyObject *__pyx_n_s_device; -static PyObject *__pyx_n_s_doc; -static PyObject *__pyx_n_s_enum; -static PyObject *__pyx_n_s_import; -static PyObject *__pyx_n_s_main; -static PyObject *__pyx_n_s_metaclass; -static PyObject *__pyx_n_s_module; -static PyObject *__pyx_n_s_name; -static PyObject *__pyx_n_s_prepare; -static PyObject *__pyx_n_s_qualname; -static PyObject *__pyx_n_s_test; -static PyObject *__pyx_pf_4cudf_5_cuda_3gpu_driverGetVersion(CYTHON_UNUSED PyObject *__pyx_self); /* proto */ -static PyObject *__pyx_pf_4cudf_5_cuda_3gpu_2runtimeGetVersion(CYTHON_UNUSED PyObject *__pyx_self); /* proto */ -static PyObject *__pyx_pf_4cudf_5_cuda_3gpu_4getDeviceCount(CYTHON_UNUSED PyObject *__pyx_self); /* proto */ -static PyObject *__pyx_pf_4cudf_5_cuda_3gpu_6getDeviceAttribute(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_attr, PyObject *__pyx_v_device); /* proto */ -/* Late includes */ - -/* "cudf/_cuda/gpu.pyx":117 - * - * - * cpdef int driverGetVersion() except? -1: # <<<<<<<<<<<<<< - * cdef int version - * status = cudaDriverGetVersion(&version) - */ - -static PyObject *__pyx_pw_4cudf_5_cuda_3gpu_1driverGetVersion(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/ -static int __pyx_f_4cudf_5_cuda_3gpu_driverGetVersion(CYTHON_UNUSED int __pyx_skip_dispatch) { - int __pyx_v_version; - CYTHON_UNUSED int __pyx_v_status; - int __pyx_r; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("driverGetVersion", 0); - - /* "cudf/_cuda/gpu.pyx":119 - * cpdef int driverGetVersion() except? -1: - * cdef int version - * status = cudaDriverGetVersion(&version) # <<<<<<<<<<<<<< - * return version - * - */ - __pyx_v_status = cudaDriverGetVersion((&__pyx_v_version)); - - /* "cudf/_cuda/gpu.pyx":120 - * cdef int version - * status = cudaDriverGetVersion(&version) - * return version # <<<<<<<<<<<<<< - * - * - */ - __pyx_r = __pyx_v_version; - goto __pyx_L0; - - /* "cudf/_cuda/gpu.pyx":117 - * - * - * cpdef int driverGetVersion() except? -1: # <<<<<<<<<<<<<< - * cdef int version - * status = cudaDriverGetVersion(&version) - */ - - /* function exit code */ - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* Python wrapper */ -static PyObject *__pyx_pw_4cudf_5_cuda_3gpu_1driverGetVersion(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/ -static char __pyx_doc_4cudf_5_cuda_3gpu_driverGetVersion[] = "driverGetVersion() -> int"; -static PyObject *__pyx_pw_4cudf_5_cuda_3gpu_1driverGetVersion(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("driverGetVersion (wrapper)", 0); - __pyx_r = __pyx_pf_4cudf_5_cuda_3gpu_driverGetVersion(__pyx_self); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_4cudf_5_cuda_3gpu_driverGetVersion(CYTHON_UNUSED PyObject *__pyx_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - __Pyx_RefNannySetupContext("driverGetVersion", 0); - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __pyx_f_4cudf_5_cuda_3gpu_driverGetVersion(0); if (unlikely(__pyx_t_1 == ((int)-1) && PyErr_Occurred())) __PYX_ERR(0, 117, __pyx_L1_error) - __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 117, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("cudf._cuda.gpu.driverGetVersion", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "cudf/_cuda/gpu.pyx":123 - * - * - * cpdef int runtimeGetVersion() except? -1: # <<<<<<<<<<<<<< - * cdef int version - * status = cudaRuntimeGetVersion(&version) - */ - -static PyObject *__pyx_pw_4cudf_5_cuda_3gpu_3runtimeGetVersion(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/ -static int __pyx_f_4cudf_5_cuda_3gpu_runtimeGetVersion(CYTHON_UNUSED int __pyx_skip_dispatch) { - int __pyx_v_version; - CYTHON_UNUSED int __pyx_v_status; - int __pyx_r; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("runtimeGetVersion", 0); - - /* "cudf/_cuda/gpu.pyx":125 - * cpdef int runtimeGetVersion() except? -1: - * cdef int version - * status = cudaRuntimeGetVersion(&version) # <<<<<<<<<<<<<< - * return version - * - */ - __pyx_v_status = cudaRuntimeGetVersion((&__pyx_v_version)); - - /* "cudf/_cuda/gpu.pyx":126 - * cdef int version - * status = cudaRuntimeGetVersion(&version) - * return version # <<<<<<<<<<<<<< - * - * cpdef int getDeviceCount() except? -1: - */ - __pyx_r = __pyx_v_version; - goto __pyx_L0; - - /* "cudf/_cuda/gpu.pyx":123 - * - * - * cpdef int runtimeGetVersion() except? -1: # <<<<<<<<<<<<<< - * cdef int version - * status = cudaRuntimeGetVersion(&version) - */ - - /* function exit code */ - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* Python wrapper */ -static PyObject *__pyx_pw_4cudf_5_cuda_3gpu_3runtimeGetVersion(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/ -static char __pyx_doc_4cudf_5_cuda_3gpu_2runtimeGetVersion[] = "runtimeGetVersion() -> int"; -static PyObject *__pyx_pw_4cudf_5_cuda_3gpu_3runtimeGetVersion(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("runtimeGetVersion (wrapper)", 0); - __pyx_r = __pyx_pf_4cudf_5_cuda_3gpu_2runtimeGetVersion(__pyx_self); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_4cudf_5_cuda_3gpu_2runtimeGetVersion(CYTHON_UNUSED PyObject *__pyx_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - __Pyx_RefNannySetupContext("runtimeGetVersion", 0); - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __pyx_f_4cudf_5_cuda_3gpu_runtimeGetVersion(0); if (unlikely(__pyx_t_1 == ((int)-1) && PyErr_Occurred())) __PYX_ERR(0, 123, __pyx_L1_error) - __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 123, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("cudf._cuda.gpu.runtimeGetVersion", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "cudf/_cuda/gpu.pyx":128 - * return version - * - * cpdef int getDeviceCount() except? -1: # <<<<<<<<<<<<<< - * cdef int count - * status = cudaGetDeviceCount(&count) - */ - -static PyObject *__pyx_pw_4cudf_5_cuda_3gpu_5getDeviceCount(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/ -static int __pyx_f_4cudf_5_cuda_3gpu_getDeviceCount(CYTHON_UNUSED int __pyx_skip_dispatch) { - int __pyx_v_count; - CYTHON_UNUSED int __pyx_v_status; - int __pyx_r; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("getDeviceCount", 0); - - /* "cudf/_cuda/gpu.pyx":130 - * cpdef int getDeviceCount() except? -1: - * cdef int count - * status = cudaGetDeviceCount(&count) # <<<<<<<<<<<<<< - * return count - * - */ - __pyx_v_status = cudaGetDeviceCount((&__pyx_v_count)); - - /* "cudf/_cuda/gpu.pyx":131 - * cdef int count - * status = cudaGetDeviceCount(&count) - * return count # <<<<<<<<<<<<<< - * - * cpdef int getDeviceAttribute(attr, device) except? -1: - */ - __pyx_r = __pyx_v_count; - goto __pyx_L0; - - /* "cudf/_cuda/gpu.pyx":128 - * return version - * - * cpdef int getDeviceCount() except? -1: # <<<<<<<<<<<<<< - * cdef int count - * status = cudaGetDeviceCount(&count) - */ - - /* function exit code */ - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* Python wrapper */ -static PyObject *__pyx_pw_4cudf_5_cuda_3gpu_5getDeviceCount(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/ -static char __pyx_doc_4cudf_5_cuda_3gpu_4getDeviceCount[] = "getDeviceCount() -> int"; -static PyObject *__pyx_pw_4cudf_5_cuda_3gpu_5getDeviceCount(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("getDeviceCount (wrapper)", 0); - __pyx_r = __pyx_pf_4cudf_5_cuda_3gpu_4getDeviceCount(__pyx_self); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_4cudf_5_cuda_3gpu_4getDeviceCount(CYTHON_UNUSED PyObject *__pyx_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - __Pyx_RefNannySetupContext("getDeviceCount", 0); - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __pyx_f_4cudf_5_cuda_3gpu_getDeviceCount(0); if (unlikely(__pyx_t_1 == ((int)-1) && PyErr_Occurred())) __PYX_ERR(0, 128, __pyx_L1_error) - __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 128, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("cudf._cuda.gpu.getDeviceCount", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "cudf/_cuda/gpu.pyx":133 - * return count - * - * cpdef int getDeviceAttribute(attr, device) except? -1: # <<<<<<<<<<<<<< - * cdef int value - * status = cudaDeviceGetAttribute(&value, attr, device) - */ - -static PyObject *__pyx_pw_4cudf_5_cuda_3gpu_7getDeviceAttribute(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static int __pyx_f_4cudf_5_cuda_3gpu_getDeviceAttribute(PyObject *__pyx_v_attr, PyObject *__pyx_v_device, CYTHON_UNUSED int __pyx_skip_dispatch) { - int __pyx_v_value; - CYTHON_UNUSED int __pyx_v_status; - int __pyx_r; - __Pyx_RefNannyDeclarations - enum cudaDeviceAttr __pyx_t_1; - int __pyx_t_2; - __Pyx_RefNannySetupContext("getDeviceAttribute", 0); - - /* "cudf/_cuda/gpu.pyx":135 - * cpdef int getDeviceAttribute(attr, device) except? -1: - * cdef int value - * status = cudaDeviceGetAttribute(&value, attr, device) # <<<<<<<<<<<<<< - * return value - */ - __pyx_t_1 = ((enum cudaDeviceAttr)__Pyx_PyInt_As_enum__cudaDeviceAttr(__pyx_v_attr)); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 135, __pyx_L1_error) - __pyx_t_2 = __Pyx_PyInt_As_int(__pyx_v_device); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 135, __pyx_L1_error) - __pyx_v_status = cudaDeviceGetAttribute((&__pyx_v_value), __pyx_t_1, __pyx_t_2); - - /* "cudf/_cuda/gpu.pyx":136 - * cdef int value - * status = cudaDeviceGetAttribute(&value, attr, device) - * return value # <<<<<<<<<<<<<< - */ - __pyx_r = __pyx_v_value; - goto __pyx_L0; - - /* "cudf/_cuda/gpu.pyx":133 - * return count - * - * cpdef int getDeviceAttribute(attr, device) except? -1: # <<<<<<<<<<<<<< - * cdef int value - * status = cudaDeviceGetAttribute(&value, attr, device) - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_AddTraceback("cudf._cuda.gpu.getDeviceAttribute", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* Python wrapper */ -static PyObject *__pyx_pw_4cudf_5_cuda_3gpu_7getDeviceAttribute(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static char __pyx_doc_4cudf_5_cuda_3gpu_6getDeviceAttribute[] = "getDeviceAttribute(attr, device) -> int"; -static PyObject *__pyx_pw_4cudf_5_cuda_3gpu_7getDeviceAttribute(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_attr = 0; - PyObject *__pyx_v_device = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("getDeviceAttribute (wrapper)", 0); - { - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_attr,&__pyx_n_s_device,0}; - PyObject* values[2] = {0,0}; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args; - const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); - switch (pos_args) { - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = PyDict_Size(__pyx_kwds); - switch (pos_args) { - case 0: - if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_attr)) != 0)) kw_args--; - else goto __pyx_L5_argtuple_error; - CYTHON_FALLTHROUGH; - case 1: - if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_device)) != 0)) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("getDeviceAttribute", 1, 2, 2, 1); __PYX_ERR(0, 133, __pyx_L3_error) - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "getDeviceAttribute") < 0)) __PYX_ERR(0, 133, __pyx_L3_error) - } - } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { - goto __pyx_L5_argtuple_error; - } else { - values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - } - __pyx_v_attr = values[0]; - __pyx_v_device = values[1]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("getDeviceAttribute", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 133, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("cudf._cuda.gpu.getDeviceAttribute", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_4cudf_5_cuda_3gpu_6getDeviceAttribute(__pyx_self, __pyx_v_attr, __pyx_v_device); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_4cudf_5_cuda_3gpu_6getDeviceAttribute(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_attr, PyObject *__pyx_v_device) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - __Pyx_RefNannySetupContext("getDeviceAttribute", 0); - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __pyx_f_4cudf_5_cuda_3gpu_getDeviceAttribute(__pyx_v_attr, __pyx_v_device, 0); if (unlikely(__pyx_t_1 == ((int)-1) && PyErr_Occurred())) __PYX_ERR(0, 133, __pyx_L1_error) - __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 133, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("cudf._cuda.gpu.getDeviceAttribute", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyMethodDef __pyx_methods[] = { - {"driverGetVersion", (PyCFunction)__pyx_pw_4cudf_5_cuda_3gpu_1driverGetVersion, METH_NOARGS, __pyx_doc_4cudf_5_cuda_3gpu_driverGetVersion}, - {"runtimeGetVersion", (PyCFunction)__pyx_pw_4cudf_5_cuda_3gpu_3runtimeGetVersion, METH_NOARGS, __pyx_doc_4cudf_5_cuda_3gpu_2runtimeGetVersion}, - {"getDeviceCount", (PyCFunction)__pyx_pw_4cudf_5_cuda_3gpu_5getDeviceCount, METH_NOARGS, __pyx_doc_4cudf_5_cuda_3gpu_4getDeviceCount}, - {"getDeviceAttribute", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_4cudf_5_cuda_3gpu_7getDeviceAttribute, METH_VARARGS|METH_KEYWORDS, __pyx_doc_4cudf_5_cuda_3gpu_6getDeviceAttribute}, - {0, 0, 0, 0} -}; - -#if PY_MAJOR_VERSION >= 3 -#if CYTHON_PEP489_MULTI_PHASE_INIT -static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ -static int __pyx_pymod_exec_gpu(PyObject* module); /*proto*/ -static PyModuleDef_Slot __pyx_moduledef_slots[] = { - {Py_mod_create, (void*)__pyx_pymod_create}, - {Py_mod_exec, (void*)__pyx_pymod_exec_gpu}, - {0, NULL} -}; -#endif - -static struct PyModuleDef __pyx_moduledef = { - PyModuleDef_HEAD_INIT, - "gpu", - 0, /* m_doc */ - #if CYTHON_PEP489_MULTI_PHASE_INIT - 0, /* m_size */ - #else - -1, /* m_size */ - #endif - __pyx_methods /* m_methods */, - #if CYTHON_PEP489_MULTI_PHASE_INIT - __pyx_moduledef_slots, /* m_slots */ - #else - NULL, /* m_reload */ - #endif - NULL, /* m_traverse */ - NULL, /* m_clear */ - NULL /* m_free */ -}; -#endif -#ifndef CYTHON_SMALL_CODE -#if defined(__clang__) - #define CYTHON_SMALL_CODE -#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) - #define CYTHON_SMALL_CODE __attribute__((cold)) -#else - #define CYTHON_SMALL_CODE -#endif -#endif - -static __Pyx_StringTabEntry __pyx_string_tab[] = { - {&__pyx_n_s_CudaDeviceAttr, __pyx_k_CudaDeviceAttr, sizeof(__pyx_k_CudaDeviceAttr), 0, 0, 1, 1}, - {&__pyx_n_s_IntEnum, __pyx_k_IntEnum, sizeof(__pyx_k_IntEnum), 0, 0, 1, 1}, - {&__pyx_n_s_attr, __pyx_k_attr, sizeof(__pyx_k_attr), 0, 0, 1, 1}, - {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrAsyncEngineCount, __pyx_k_cudaDevAttrAsyncEngineCount, sizeof(__pyx_k_cudaDevAttrAsyncEngineCount), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrCanFlushRemoteWrites, __pyx_k_cudaDevAttrCanFlushRemoteWrites, sizeof(__pyx_k_cudaDevAttrCanFlushRemoteWrites), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrCanMapHostMemory, __pyx_k_cudaDevAttrCanMapHostMemory, sizeof(__pyx_k_cudaDevAttrCanMapHostMemory), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrCanUseHostPointerForR, __pyx_k_cudaDevAttrCanUseHostPointerForR, sizeof(__pyx_k_cudaDevAttrCanUseHostPointerForR), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrClockRate, __pyx_k_cudaDevAttrClockRate, sizeof(__pyx_k_cudaDevAttrClockRate), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrComputeCapabilityMajo, __pyx_k_cudaDevAttrComputeCapabilityMajo, sizeof(__pyx_k_cudaDevAttrComputeCapabilityMajo), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrComputeCapabilityMino, __pyx_k_cudaDevAttrComputeCapabilityMino, sizeof(__pyx_k_cudaDevAttrComputeCapabilityMino), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrComputeMode, __pyx_k_cudaDevAttrComputeMode, sizeof(__pyx_k_cudaDevAttrComputeMode), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrComputePreemptionSupp, __pyx_k_cudaDevAttrComputePreemptionSupp, sizeof(__pyx_k_cudaDevAttrComputePreemptionSupp), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrConcurrentKernels, __pyx_k_cudaDevAttrConcurrentKernels, sizeof(__pyx_k_cudaDevAttrConcurrentKernels), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrConcurrentManagedAcce, __pyx_k_cudaDevAttrConcurrentManagedAcce, sizeof(__pyx_k_cudaDevAttrConcurrentManagedAcce), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrCooperativeLaunch, __pyx_k_cudaDevAttrCooperativeLaunch, sizeof(__pyx_k_cudaDevAttrCooperativeLaunch), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrCooperativeMultiDevic, __pyx_k_cudaDevAttrCooperativeMultiDevic, sizeof(__pyx_k_cudaDevAttrCooperativeMultiDevic), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrDirectManagedMemAcces, __pyx_k_cudaDevAttrDirectManagedMemAcces, sizeof(__pyx_k_cudaDevAttrDirectManagedMemAcces), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrEccEnabled, __pyx_k_cudaDevAttrEccEnabled, sizeof(__pyx_k_cudaDevAttrEccEnabled), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrGlobalL1CacheSupporte, __pyx_k_cudaDevAttrGlobalL1CacheSupporte, sizeof(__pyx_k_cudaDevAttrGlobalL1CacheSupporte), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrGlobalMemoryBusWidth, __pyx_k_cudaDevAttrGlobalMemoryBusWidth, sizeof(__pyx_k_cudaDevAttrGlobalMemoryBusWidth), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrGpuOverlap, __pyx_k_cudaDevAttrGpuOverlap, sizeof(__pyx_k_cudaDevAttrGpuOverlap), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrHostNativeAtomicSuppo, __pyx_k_cudaDevAttrHostNativeAtomicSuppo, sizeof(__pyx_k_cudaDevAttrHostNativeAtomicSuppo), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrHostRegisterSupported, __pyx_k_cudaDevAttrHostRegisterSupported, sizeof(__pyx_k_cudaDevAttrHostRegisterSupported), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrIntegrated, __pyx_k_cudaDevAttrIntegrated, sizeof(__pyx_k_cudaDevAttrIntegrated), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrIsMultiGpuBoard, __pyx_k_cudaDevAttrIsMultiGpuBoard, sizeof(__pyx_k_cudaDevAttrIsMultiGpuBoard), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrKernelExecTimeout, __pyx_k_cudaDevAttrKernelExecTimeout, sizeof(__pyx_k_cudaDevAttrKernelExecTimeout), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrL2CacheSize, __pyx_k_cudaDevAttrL2CacheSize, sizeof(__pyx_k_cudaDevAttrL2CacheSize), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrLocalL1CacheSupported, __pyx_k_cudaDevAttrLocalL1CacheSupported, sizeof(__pyx_k_cudaDevAttrLocalL1CacheSupported), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrManagedMemory, __pyx_k_cudaDevAttrManagedMemory, sizeof(__pyx_k_cudaDevAttrManagedMemory), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrMaxBlockDimX, __pyx_k_cudaDevAttrMaxBlockDimX, sizeof(__pyx_k_cudaDevAttrMaxBlockDimX), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrMaxBlockDimY, __pyx_k_cudaDevAttrMaxBlockDimY, sizeof(__pyx_k_cudaDevAttrMaxBlockDimY), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrMaxBlockDimZ, __pyx_k_cudaDevAttrMaxBlockDimZ, sizeof(__pyx_k_cudaDevAttrMaxBlockDimZ), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrMaxGridDimX, __pyx_k_cudaDevAttrMaxGridDimX, sizeof(__pyx_k_cudaDevAttrMaxGridDimX), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrMaxGridDimY, __pyx_k_cudaDevAttrMaxGridDimY, sizeof(__pyx_k_cudaDevAttrMaxGridDimY), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrMaxGridDimZ, __pyx_k_cudaDevAttrMaxGridDimZ, sizeof(__pyx_k_cudaDevAttrMaxGridDimZ), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrMaxPitch, __pyx_k_cudaDevAttrMaxPitch, sizeof(__pyx_k_cudaDevAttrMaxPitch), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrMaxRegistersPerBlock, __pyx_k_cudaDevAttrMaxRegistersPerBlock, sizeof(__pyx_k_cudaDevAttrMaxRegistersPerBlock), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrMaxRegistersPerMultip, __pyx_k_cudaDevAttrMaxRegistersPerMultip, sizeof(__pyx_k_cudaDevAttrMaxRegistersPerMultip), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrMaxSharedMemoryPerBlo, __pyx_k_cudaDevAttrMaxSharedMemoryPerBlo, sizeof(__pyx_k_cudaDevAttrMaxSharedMemoryPerBlo), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrMaxSharedMemoryPerBlo_2, __pyx_k_cudaDevAttrMaxSharedMemoryPerBlo_2, sizeof(__pyx_k_cudaDevAttrMaxSharedMemoryPerBlo_2), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrMaxSharedMemoryPerMul, __pyx_k_cudaDevAttrMaxSharedMemoryPerMul, sizeof(__pyx_k_cudaDevAttrMaxSharedMemoryPerMul), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrMaxSurface1DLayeredLa, __pyx_k_cudaDevAttrMaxSurface1DLayeredLa, sizeof(__pyx_k_cudaDevAttrMaxSurface1DLayeredLa), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrMaxSurface1DLayeredWi, __pyx_k_cudaDevAttrMaxSurface1DLayeredWi, sizeof(__pyx_k_cudaDevAttrMaxSurface1DLayeredWi), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrMaxSurface1DWidth, __pyx_k_cudaDevAttrMaxSurface1DWidth, sizeof(__pyx_k_cudaDevAttrMaxSurface1DWidth), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrMaxSurface2DHeight, __pyx_k_cudaDevAttrMaxSurface2DHeight, sizeof(__pyx_k_cudaDevAttrMaxSurface2DHeight), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrMaxSurface2DLayeredHe, __pyx_k_cudaDevAttrMaxSurface2DLayeredHe, sizeof(__pyx_k_cudaDevAttrMaxSurface2DLayeredHe), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrMaxSurface2DLayeredLa, __pyx_k_cudaDevAttrMaxSurface2DLayeredLa, sizeof(__pyx_k_cudaDevAttrMaxSurface2DLayeredLa), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrMaxSurface2DLayeredWi, __pyx_k_cudaDevAttrMaxSurface2DLayeredWi, sizeof(__pyx_k_cudaDevAttrMaxSurface2DLayeredWi), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrMaxSurface2DWidth, __pyx_k_cudaDevAttrMaxSurface2DWidth, sizeof(__pyx_k_cudaDevAttrMaxSurface2DWidth), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrMaxSurface3DDepth, __pyx_k_cudaDevAttrMaxSurface3DDepth, sizeof(__pyx_k_cudaDevAttrMaxSurface3DDepth), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrMaxSurface3DHeight, __pyx_k_cudaDevAttrMaxSurface3DHeight, sizeof(__pyx_k_cudaDevAttrMaxSurface3DHeight), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrMaxSurface3DWidth, __pyx_k_cudaDevAttrMaxSurface3DWidth, sizeof(__pyx_k_cudaDevAttrMaxSurface3DWidth), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrMaxSurfaceCubemapLaye, __pyx_k_cudaDevAttrMaxSurfaceCubemapLaye, sizeof(__pyx_k_cudaDevAttrMaxSurfaceCubemapLaye), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrMaxSurfaceCubemapLaye_2, __pyx_k_cudaDevAttrMaxSurfaceCubemapLaye_2, sizeof(__pyx_k_cudaDevAttrMaxSurfaceCubemapLaye_2), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrMaxSurfaceCubemapWidt, __pyx_k_cudaDevAttrMaxSurfaceCubemapWidt, sizeof(__pyx_k_cudaDevAttrMaxSurfaceCubemapWidt), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrMaxTexture1DLayeredLa, __pyx_k_cudaDevAttrMaxTexture1DLayeredLa, sizeof(__pyx_k_cudaDevAttrMaxTexture1DLayeredLa), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrMaxTexture1DLayeredWi, __pyx_k_cudaDevAttrMaxTexture1DLayeredWi, sizeof(__pyx_k_cudaDevAttrMaxTexture1DLayeredWi), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrMaxTexture1DLinearWid, __pyx_k_cudaDevAttrMaxTexture1DLinearWid, sizeof(__pyx_k_cudaDevAttrMaxTexture1DLinearWid), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrMaxTexture1DMipmapped, __pyx_k_cudaDevAttrMaxTexture1DMipmapped, sizeof(__pyx_k_cudaDevAttrMaxTexture1DMipmapped), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrMaxTexture1DWidth, __pyx_k_cudaDevAttrMaxTexture1DWidth, sizeof(__pyx_k_cudaDevAttrMaxTexture1DWidth), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrMaxTexture2DGatherHei, __pyx_k_cudaDevAttrMaxTexture2DGatherHei, sizeof(__pyx_k_cudaDevAttrMaxTexture2DGatherHei), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrMaxTexture2DGatherWid, __pyx_k_cudaDevAttrMaxTexture2DGatherWid, sizeof(__pyx_k_cudaDevAttrMaxTexture2DGatherWid), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrMaxTexture2DHeight, __pyx_k_cudaDevAttrMaxTexture2DHeight, sizeof(__pyx_k_cudaDevAttrMaxTexture2DHeight), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrMaxTexture2DLayeredHe, __pyx_k_cudaDevAttrMaxTexture2DLayeredHe, sizeof(__pyx_k_cudaDevAttrMaxTexture2DLayeredHe), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrMaxTexture2DLayeredLa, __pyx_k_cudaDevAttrMaxTexture2DLayeredLa, sizeof(__pyx_k_cudaDevAttrMaxTexture2DLayeredLa), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrMaxTexture2DLayeredWi, __pyx_k_cudaDevAttrMaxTexture2DLayeredWi, sizeof(__pyx_k_cudaDevAttrMaxTexture2DLayeredWi), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrMaxTexture2DLinearHei, __pyx_k_cudaDevAttrMaxTexture2DLinearHei, sizeof(__pyx_k_cudaDevAttrMaxTexture2DLinearHei), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrMaxTexture2DLinearPit, __pyx_k_cudaDevAttrMaxTexture2DLinearPit, sizeof(__pyx_k_cudaDevAttrMaxTexture2DLinearPit), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrMaxTexture2DLinearWid, __pyx_k_cudaDevAttrMaxTexture2DLinearWid, sizeof(__pyx_k_cudaDevAttrMaxTexture2DLinearWid), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrMaxTexture2DMipmapped, __pyx_k_cudaDevAttrMaxTexture2DMipmapped, sizeof(__pyx_k_cudaDevAttrMaxTexture2DMipmapped), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrMaxTexture2DMipmapped_2, __pyx_k_cudaDevAttrMaxTexture2DMipmapped_2, sizeof(__pyx_k_cudaDevAttrMaxTexture2DMipmapped_2), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrMaxTexture2DWidth, __pyx_k_cudaDevAttrMaxTexture2DWidth, sizeof(__pyx_k_cudaDevAttrMaxTexture2DWidth), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrMaxTexture3DDepth, __pyx_k_cudaDevAttrMaxTexture3DDepth, sizeof(__pyx_k_cudaDevAttrMaxTexture3DDepth), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrMaxTexture3DDepthAlt, __pyx_k_cudaDevAttrMaxTexture3DDepthAlt, sizeof(__pyx_k_cudaDevAttrMaxTexture3DDepthAlt), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrMaxTexture3DHeight, __pyx_k_cudaDevAttrMaxTexture3DHeight, sizeof(__pyx_k_cudaDevAttrMaxTexture3DHeight), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrMaxTexture3DHeightAlt, __pyx_k_cudaDevAttrMaxTexture3DHeightAlt, sizeof(__pyx_k_cudaDevAttrMaxTexture3DHeightAlt), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrMaxTexture3DWidth, __pyx_k_cudaDevAttrMaxTexture3DWidth, sizeof(__pyx_k_cudaDevAttrMaxTexture3DWidth), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrMaxTexture3DWidthAlt, __pyx_k_cudaDevAttrMaxTexture3DWidthAlt, sizeof(__pyx_k_cudaDevAttrMaxTexture3DWidthAlt), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrMaxTextureCubemapLaye, __pyx_k_cudaDevAttrMaxTextureCubemapLaye, sizeof(__pyx_k_cudaDevAttrMaxTextureCubemapLaye), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrMaxTextureCubemapLaye_2, __pyx_k_cudaDevAttrMaxTextureCubemapLaye_2, sizeof(__pyx_k_cudaDevAttrMaxTextureCubemapLaye_2), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrMaxTextureCubemapWidt, __pyx_k_cudaDevAttrMaxTextureCubemapWidt, sizeof(__pyx_k_cudaDevAttrMaxTextureCubemapWidt), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrMaxThreadsPerBlock, __pyx_k_cudaDevAttrMaxThreadsPerBlock, sizeof(__pyx_k_cudaDevAttrMaxThreadsPerBlock), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrMaxThreadsPerMultiPro, __pyx_k_cudaDevAttrMaxThreadsPerMultiPro, sizeof(__pyx_k_cudaDevAttrMaxThreadsPerMultiPro), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrMemoryClockRate, __pyx_k_cudaDevAttrMemoryClockRate, sizeof(__pyx_k_cudaDevAttrMemoryClockRate), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrMultiGpuBoardGroupID, __pyx_k_cudaDevAttrMultiGpuBoardGroupID, sizeof(__pyx_k_cudaDevAttrMultiGpuBoardGroupID), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrMultiProcessorCount, __pyx_k_cudaDevAttrMultiProcessorCount, sizeof(__pyx_k_cudaDevAttrMultiProcessorCount), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrPageableMemoryAccess, __pyx_k_cudaDevAttrPageableMemoryAccess, sizeof(__pyx_k_cudaDevAttrPageableMemoryAccess), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrPageableMemoryAccessU, __pyx_k_cudaDevAttrPageableMemoryAccessU, sizeof(__pyx_k_cudaDevAttrPageableMemoryAccessU), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrPciBusId, __pyx_k_cudaDevAttrPciBusId, sizeof(__pyx_k_cudaDevAttrPciBusId), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrPciDeviceId, __pyx_k_cudaDevAttrPciDeviceId, sizeof(__pyx_k_cudaDevAttrPciDeviceId), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrPciDomainId, __pyx_k_cudaDevAttrPciDomainId, sizeof(__pyx_k_cudaDevAttrPciDomainId), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrReserved92, __pyx_k_cudaDevAttrReserved92, sizeof(__pyx_k_cudaDevAttrReserved92), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrReserved93, __pyx_k_cudaDevAttrReserved93, sizeof(__pyx_k_cudaDevAttrReserved93), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrReserved94, __pyx_k_cudaDevAttrReserved94, sizeof(__pyx_k_cudaDevAttrReserved94), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrSingleToDoublePrecisi, __pyx_k_cudaDevAttrSingleToDoublePrecisi, sizeof(__pyx_k_cudaDevAttrSingleToDoublePrecisi), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrStreamPrioritiesSuppo, __pyx_k_cudaDevAttrStreamPrioritiesSuppo, sizeof(__pyx_k_cudaDevAttrStreamPrioritiesSuppo), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrSurfaceAlignment, __pyx_k_cudaDevAttrSurfaceAlignment, sizeof(__pyx_k_cudaDevAttrSurfaceAlignment), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrTccDriver, __pyx_k_cudaDevAttrTccDriver, sizeof(__pyx_k_cudaDevAttrTccDriver), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrTextureAlignment, __pyx_k_cudaDevAttrTextureAlignment, sizeof(__pyx_k_cudaDevAttrTextureAlignment), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrTexturePitchAlignment, __pyx_k_cudaDevAttrTexturePitchAlignment, sizeof(__pyx_k_cudaDevAttrTexturePitchAlignment), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrTotalConstantMemory, __pyx_k_cudaDevAttrTotalConstantMemory, sizeof(__pyx_k_cudaDevAttrTotalConstantMemory), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrUnifiedAddressing, __pyx_k_cudaDevAttrUnifiedAddressing, sizeof(__pyx_k_cudaDevAttrUnifiedAddressing), 0, 0, 1, 1}, - {&__pyx_n_s_cudaDevAttrWarpSize, __pyx_k_cudaDevAttrWarpSize, sizeof(__pyx_k_cudaDevAttrWarpSize), 0, 0, 1, 1}, - {&__pyx_n_s_cudf__cuda_gpu, __pyx_k_cudf__cuda_gpu, sizeof(__pyx_k_cudf__cuda_gpu), 0, 0, 1, 1}, - {&__pyx_n_s_device, __pyx_k_device, sizeof(__pyx_k_device), 0, 0, 1, 1}, - {&__pyx_n_s_doc, __pyx_k_doc, sizeof(__pyx_k_doc), 0, 0, 1, 1}, - {&__pyx_n_s_enum, __pyx_k_enum, sizeof(__pyx_k_enum), 0, 0, 1, 1}, - {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, - {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, - {&__pyx_n_s_metaclass, __pyx_k_metaclass, sizeof(__pyx_k_metaclass), 0, 0, 1, 1}, - {&__pyx_n_s_module, __pyx_k_module, sizeof(__pyx_k_module), 0, 0, 1, 1}, - {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1}, - {&__pyx_n_s_prepare, __pyx_k_prepare, sizeof(__pyx_k_prepare), 0, 0, 1, 1}, - {&__pyx_n_s_qualname, __pyx_k_qualname, sizeof(__pyx_k_qualname), 0, 0, 1, 1}, - {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, - {0, 0, 0, 0, 0, 0, 0} -}; -static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) { - return 0; -} - -static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); - __Pyx_RefNannyFinishContext(); - return 0; -} - -static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) { - if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - return 0; - __pyx_L1_error:; - return -1; -} - -static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/ - -static int __Pyx_modinit_global_init_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0); - /*--- Global init code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - -static int __Pyx_modinit_variable_export_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0); - /*--- Variable export code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - -static int __Pyx_modinit_function_export_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0); - /*--- Function export code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - -static int __Pyx_modinit_type_init_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0); - /*--- Type init code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - -static int __Pyx_modinit_type_import_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0); - /*--- Type import code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - -static int __Pyx_modinit_variable_import_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0); - /*--- Variable import code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - -static int __Pyx_modinit_function_import_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0); - /*--- Function import code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - - -#if PY_MAJOR_VERSION < 3 -#ifdef CYTHON_NO_PYINIT_EXPORT -#define __Pyx_PyMODINIT_FUNC void -#else -#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC -#endif -#else -#ifdef CYTHON_NO_PYINIT_EXPORT -#define __Pyx_PyMODINIT_FUNC PyObject * -#else -#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC -#endif -#endif - - -#if PY_MAJOR_VERSION < 3 -__Pyx_PyMODINIT_FUNC initgpu(void) CYTHON_SMALL_CODE; /*proto*/ -__Pyx_PyMODINIT_FUNC initgpu(void) -#else -__Pyx_PyMODINIT_FUNC PyInit_gpu(void) CYTHON_SMALL_CODE; /*proto*/ -__Pyx_PyMODINIT_FUNC PyInit_gpu(void) -#if CYTHON_PEP489_MULTI_PHASE_INIT -{ - return PyModuleDef_Init(&__pyx_moduledef); -} -static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) { - #if PY_VERSION_HEX >= 0x030700A1 - static PY_INT64_T main_interpreter_id = -1; - PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp); - if (main_interpreter_id == -1) { - main_interpreter_id = current_id; - return (unlikely(current_id == -1)) ? -1 : 0; - } else if (unlikely(main_interpreter_id != current_id)) - #else - static PyInterpreterState *main_interpreter = NULL; - PyInterpreterState *current_interpreter = PyThreadState_Get()->interp; - if (!main_interpreter) { - main_interpreter = current_interpreter; - } else if (unlikely(main_interpreter != current_interpreter)) - #endif - { - PyErr_SetString( - PyExc_ImportError, - "Interpreter change detected - this module can only be loaded into one interpreter per process."); - return -1; - } - return 0; -} -static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) { - PyObject *value = PyObject_GetAttrString(spec, from_name); - int result = 0; - if (likely(value)) { - if (allow_none || value != Py_None) { - result = PyDict_SetItemString(moddict, to_name, value); - } - Py_DECREF(value); - } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { - PyErr_Clear(); - } else { - result = -1; - } - return result; -} -static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { - PyObject *module = NULL, *moddict, *modname; - if (__Pyx_check_single_interpreter()) - return NULL; - if (__pyx_m) - return __Pyx_NewRef(__pyx_m); - modname = PyObject_GetAttrString(spec, "name"); - if (unlikely(!modname)) goto bad; - module = PyModule_NewObject(modname); - Py_DECREF(modname); - if (unlikely(!module)) goto bad; - moddict = PyModule_GetDict(module); - if (unlikely(!moddict)) goto bad; - if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad; - if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad; - if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad; - if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad; - return module; -bad: - Py_XDECREF(module); - return NULL; -} - - -static CYTHON_SMALL_CODE int __pyx_pymod_exec_gpu(PyObject *__pyx_pyinit_module) -#endif -#endif -{ - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - __Pyx_RefNannyDeclarations - #if CYTHON_PEP489_MULTI_PHASE_INIT - if (__pyx_m) { - if (__pyx_m == __pyx_pyinit_module) return 0; - PyErr_SetString(PyExc_RuntimeError, "Module 'gpu' has already been imported. Re-initialisation is not supported."); - return -1; - } - #elif PY_MAJOR_VERSION >= 3 - if (__pyx_m) return __Pyx_NewRef(__pyx_m); - #endif - #if CYTHON_REFNANNY -__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); -if (!__Pyx_RefNanny) { - PyErr_Clear(); - __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); - if (!__Pyx_RefNanny) - Py_FatalError("failed to import 'refnanny' module"); -} -#endif - __Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_gpu(void)", 0); - if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #ifdef __Pxy_PyFrame_Initialize_Offsets - __Pxy_PyFrame_Initialize_Offsets(); - #endif - __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) - #ifdef __Pyx_CyFunction_USED - if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - #ifdef __Pyx_FusedFunction_USED - if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - #ifdef __Pyx_Coroutine_USED - if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - #ifdef __Pyx_Generator_USED - if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - #ifdef __Pyx_AsyncGen_USED - if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - #ifdef __Pyx_StopAsyncIteration_USED - if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - /*--- Library function declarations ---*/ - /*--- Threads initialization code ---*/ - #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS - #ifdef WITH_THREAD /* Python build with threading support? */ - PyEval_InitThreads(); - #endif - #endif - /*--- Module creation code ---*/ - #if CYTHON_PEP489_MULTI_PHASE_INIT - __pyx_m = __pyx_pyinit_module; - Py_INCREF(__pyx_m); - #else - #if PY_MAJOR_VERSION < 3 - __pyx_m = Py_InitModule4("gpu", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); - #else - __pyx_m = PyModule_Create(&__pyx_moduledef); - #endif - if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) - Py_INCREF(__pyx_d); - __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) - Py_INCREF(__pyx_b); - __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error) - Py_INCREF(__pyx_cython_runtime); - if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - /*--- Initialize various global constants etc. ---*/ - if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) - if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - if (__pyx_module_is_main_cudf___cuda__gpu) { - if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - } - #if PY_MAJOR_VERSION >= 3 - { - PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) - if (!PyDict_GetItemString(modules, "cudf._cuda.gpu")) { - if (unlikely(PyDict_SetItemString(modules, "cudf._cuda.gpu", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) - } - } - #endif - /*--- Builtin init code ---*/ - if (__Pyx_InitCachedBuiltins() < 0) goto __pyx_L1_error; - /*--- Constants init code ---*/ - if (__Pyx_InitCachedConstants() < 0) goto __pyx_L1_error; - /*--- Global type/function init code ---*/ - (void)__Pyx_modinit_global_init_code(); - (void)__Pyx_modinit_variable_export_code(); - (void)__Pyx_modinit_function_export_code(); - (void)__Pyx_modinit_type_init_code(); - (void)__Pyx_modinit_type_import_code(); - (void)__Pyx_modinit_variable_import_code(); - (void)__Pyx_modinit_function_import_code(); - /*--- Execution code ---*/ - #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) - if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - - /* "cudf/_cuda/gpu.pyx":10 - * cudaDeviceAttr - * ) - * from enum import IntEnum # <<<<<<<<<<<<<< - * from cudf._cuda.gpu cimport underlying_type_attribute - * - */ - __pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_INCREF(__pyx_n_s_IntEnum); - __Pyx_GIVEREF(__pyx_n_s_IntEnum); - PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_s_IntEnum); - __pyx_t_2 = __Pyx_Import(__pyx_n_s_enum, __pyx_t_1, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_IntEnum); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_IntEnum, __pyx_t_1) < 0) __PYX_ERR(0, 10, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "cudf/_cuda/gpu.pyx":14 - * - * - * class CudaDeviceAttr(IntEnum): # <<<<<<<<<<<<<< - * cudaDevAttrMaxThreadsPerBlock = cudaDeviceAttr.cudaDevAttrMaxThreadsPerBlock - * cudaDevAttrMaxBlockDimX = cudaDeviceAttr.cudaDevAttrMaxBlockDimX - */ - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_IntEnum); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_2); - __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_CalculateMetaclass(NULL, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = __Pyx_Py3MetaclassPrepare(__pyx_t_2, __pyx_t_1, __pyx_n_s_CudaDeviceAttr, __pyx_n_s_CudaDeviceAttr, (PyObject *) NULL, __pyx_n_s_cudf__cuda_gpu, (PyObject *) NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - - /* "cudf/_cuda/gpu.pyx":15 - * - * class CudaDeviceAttr(IntEnum): - * cudaDevAttrMaxThreadsPerBlock = cudaDeviceAttr.cudaDevAttrMaxThreadsPerBlock # <<<<<<<<<<<<<< - * cudaDevAttrMaxBlockDimX = cudaDeviceAttr.cudaDevAttrMaxBlockDimX - * cudaDevAttrMaxBlockDimY = cudaDeviceAttr.cudaDevAttrMaxBlockDimY - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxThreadsPerBlock)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 15, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxThreadsPerBlock, __pyx_t_4) < 0) __PYX_ERR(0, 15, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":16 - * class CudaDeviceAttr(IntEnum): - * cudaDevAttrMaxThreadsPerBlock = cudaDeviceAttr.cudaDevAttrMaxThreadsPerBlock - * cudaDevAttrMaxBlockDimX = cudaDeviceAttr.cudaDevAttrMaxBlockDimX # <<<<<<<<<<<<<< - * cudaDevAttrMaxBlockDimY = cudaDeviceAttr.cudaDevAttrMaxBlockDimY - * cudaDevAttrMaxBlockDimZ = cudaDeviceAttr.cudaDevAttrMaxBlockDimZ - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxBlockDimX)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxBlockDimX, __pyx_t_4) < 0) __PYX_ERR(0, 16, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":17 - * cudaDevAttrMaxThreadsPerBlock = cudaDeviceAttr.cudaDevAttrMaxThreadsPerBlock - * cudaDevAttrMaxBlockDimX = cudaDeviceAttr.cudaDevAttrMaxBlockDimX - * cudaDevAttrMaxBlockDimY = cudaDeviceAttr.cudaDevAttrMaxBlockDimY # <<<<<<<<<<<<<< - * cudaDevAttrMaxBlockDimZ = cudaDeviceAttr.cudaDevAttrMaxBlockDimZ - * cudaDevAttrMaxGridDimX = cudaDeviceAttr.cudaDevAttrMaxGridDimX - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxBlockDimY)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxBlockDimY, __pyx_t_4) < 0) __PYX_ERR(0, 17, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":18 - * cudaDevAttrMaxBlockDimX = cudaDeviceAttr.cudaDevAttrMaxBlockDimX - * cudaDevAttrMaxBlockDimY = cudaDeviceAttr.cudaDevAttrMaxBlockDimY - * cudaDevAttrMaxBlockDimZ = cudaDeviceAttr.cudaDevAttrMaxBlockDimZ # <<<<<<<<<<<<<< - * cudaDevAttrMaxGridDimX = cudaDeviceAttr.cudaDevAttrMaxGridDimX - * cudaDevAttrMaxGridDimY = cudaDeviceAttr.cudaDevAttrMaxGridDimY - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxBlockDimZ)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxBlockDimZ, __pyx_t_4) < 0) __PYX_ERR(0, 18, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":19 - * cudaDevAttrMaxBlockDimY = cudaDeviceAttr.cudaDevAttrMaxBlockDimY - * cudaDevAttrMaxBlockDimZ = cudaDeviceAttr.cudaDevAttrMaxBlockDimZ - * cudaDevAttrMaxGridDimX = cudaDeviceAttr.cudaDevAttrMaxGridDimX # <<<<<<<<<<<<<< - * cudaDevAttrMaxGridDimY = cudaDeviceAttr.cudaDevAttrMaxGridDimY - * cudaDevAttrMaxGridDimZ = cudaDeviceAttr.cudaDevAttrMaxGridDimZ - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxGridDimX)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 19, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxGridDimX, __pyx_t_4) < 0) __PYX_ERR(0, 19, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":20 - * cudaDevAttrMaxBlockDimZ = cudaDeviceAttr.cudaDevAttrMaxBlockDimZ - * cudaDevAttrMaxGridDimX = cudaDeviceAttr.cudaDevAttrMaxGridDimX - * cudaDevAttrMaxGridDimY = cudaDeviceAttr.cudaDevAttrMaxGridDimY # <<<<<<<<<<<<<< - * cudaDevAttrMaxGridDimZ = cudaDeviceAttr.cudaDevAttrMaxGridDimZ - * cudaDevAttrMaxSharedMemoryPerBlock = cudaDeviceAttr.cudaDevAttrMaxSharedMemoryPerBlock - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxGridDimY)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 20, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxGridDimY, __pyx_t_4) < 0) __PYX_ERR(0, 20, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":21 - * cudaDevAttrMaxGridDimX = cudaDeviceAttr.cudaDevAttrMaxGridDimX - * cudaDevAttrMaxGridDimY = cudaDeviceAttr.cudaDevAttrMaxGridDimY - * cudaDevAttrMaxGridDimZ = cudaDeviceAttr.cudaDevAttrMaxGridDimZ # <<<<<<<<<<<<<< - * cudaDevAttrMaxSharedMemoryPerBlock = cudaDeviceAttr.cudaDevAttrMaxSharedMemoryPerBlock - * cudaDevAttrTotalConstantMemory = cudaDeviceAttr.cudaDevAttrTotalConstantMemory - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxGridDimZ)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxGridDimZ, __pyx_t_4) < 0) __PYX_ERR(0, 21, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":22 - * cudaDevAttrMaxGridDimY = cudaDeviceAttr.cudaDevAttrMaxGridDimY - * cudaDevAttrMaxGridDimZ = cudaDeviceAttr.cudaDevAttrMaxGridDimZ - * cudaDevAttrMaxSharedMemoryPerBlock = cudaDeviceAttr.cudaDevAttrMaxSharedMemoryPerBlock # <<<<<<<<<<<<<< - * cudaDevAttrTotalConstantMemory = cudaDeviceAttr.cudaDevAttrTotalConstantMemory - * cudaDevAttrWarpSize = cudaDeviceAttr.cudaDevAttrWarpSize - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxSharedMemoryPerBlock)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 22, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxSharedMemoryPerBlo, __pyx_t_4) < 0) __PYX_ERR(0, 22, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":23 - * cudaDevAttrMaxGridDimZ = cudaDeviceAttr.cudaDevAttrMaxGridDimZ - * cudaDevAttrMaxSharedMemoryPerBlock = cudaDeviceAttr.cudaDevAttrMaxSharedMemoryPerBlock - * cudaDevAttrTotalConstantMemory = cudaDeviceAttr.cudaDevAttrTotalConstantMemory # <<<<<<<<<<<<<< - * cudaDevAttrWarpSize = cudaDeviceAttr.cudaDevAttrWarpSize - * cudaDevAttrMaxPitch = cudaDeviceAttr.cudaDevAttrMaxPitch - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrTotalConstantMemory)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 23, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrTotalConstantMemory, __pyx_t_4) < 0) __PYX_ERR(0, 23, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":24 - * cudaDevAttrMaxSharedMemoryPerBlock = cudaDeviceAttr.cudaDevAttrMaxSharedMemoryPerBlock - * cudaDevAttrTotalConstantMemory = cudaDeviceAttr.cudaDevAttrTotalConstantMemory - * cudaDevAttrWarpSize = cudaDeviceAttr.cudaDevAttrWarpSize # <<<<<<<<<<<<<< - * cudaDevAttrMaxPitch = cudaDeviceAttr.cudaDevAttrMaxPitch - * cudaDevAttrMaxRegistersPerBlock = cudaDeviceAttr.cudaDevAttrMaxRegistersPerBlock - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrWarpSize)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 24, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrWarpSize, __pyx_t_4) < 0) __PYX_ERR(0, 24, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":25 - * cudaDevAttrTotalConstantMemory = cudaDeviceAttr.cudaDevAttrTotalConstantMemory - * cudaDevAttrWarpSize = cudaDeviceAttr.cudaDevAttrWarpSize - * cudaDevAttrMaxPitch = cudaDeviceAttr.cudaDevAttrMaxPitch # <<<<<<<<<<<<<< - * cudaDevAttrMaxRegistersPerBlock = cudaDeviceAttr.cudaDevAttrMaxRegistersPerBlock - * cudaDevAttrClockRate = cudaDeviceAttr.cudaDevAttrClockRate - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxPitch)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 25, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxPitch, __pyx_t_4) < 0) __PYX_ERR(0, 25, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":26 - * cudaDevAttrWarpSize = cudaDeviceAttr.cudaDevAttrWarpSize - * cudaDevAttrMaxPitch = cudaDeviceAttr.cudaDevAttrMaxPitch - * cudaDevAttrMaxRegistersPerBlock = cudaDeviceAttr.cudaDevAttrMaxRegistersPerBlock # <<<<<<<<<<<<<< - * cudaDevAttrClockRate = cudaDeviceAttr.cudaDevAttrClockRate - * cudaDevAttrTextureAlignment = cudaDeviceAttr.cudaDevAttrTextureAlignment - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxRegistersPerBlock)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 26, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxRegistersPerBlock, __pyx_t_4) < 0) __PYX_ERR(0, 26, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":27 - * cudaDevAttrMaxPitch = cudaDeviceAttr.cudaDevAttrMaxPitch - * cudaDevAttrMaxRegistersPerBlock = cudaDeviceAttr.cudaDevAttrMaxRegistersPerBlock - * cudaDevAttrClockRate = cudaDeviceAttr.cudaDevAttrClockRate # <<<<<<<<<<<<<< - * cudaDevAttrTextureAlignment = cudaDeviceAttr.cudaDevAttrTextureAlignment - * cudaDevAttrGpuOverlap = cudaDeviceAttr.cudaDevAttrGpuOverlap - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrClockRate)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 27, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrClockRate, __pyx_t_4) < 0) __PYX_ERR(0, 27, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":28 - * cudaDevAttrMaxRegistersPerBlock = cudaDeviceAttr.cudaDevAttrMaxRegistersPerBlock - * cudaDevAttrClockRate = cudaDeviceAttr.cudaDevAttrClockRate - * cudaDevAttrTextureAlignment = cudaDeviceAttr.cudaDevAttrTextureAlignment # <<<<<<<<<<<<<< - * cudaDevAttrGpuOverlap = cudaDeviceAttr.cudaDevAttrGpuOverlap - * cudaDevAttrMultiProcessorCount = cudaDeviceAttr.cudaDevAttrMultiProcessorCount - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrTextureAlignment)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 28, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrTextureAlignment, __pyx_t_4) < 0) __PYX_ERR(0, 28, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":29 - * cudaDevAttrClockRate = cudaDeviceAttr.cudaDevAttrClockRate - * cudaDevAttrTextureAlignment = cudaDeviceAttr.cudaDevAttrTextureAlignment - * cudaDevAttrGpuOverlap = cudaDeviceAttr.cudaDevAttrGpuOverlap # <<<<<<<<<<<<<< - * cudaDevAttrMultiProcessorCount = cudaDeviceAttr.cudaDevAttrMultiProcessorCount - * cudaDevAttrKernelExecTimeout = cudaDeviceAttr.cudaDevAttrKernelExecTimeout - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrGpuOverlap)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 29, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrGpuOverlap, __pyx_t_4) < 0) __PYX_ERR(0, 29, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":30 - * cudaDevAttrTextureAlignment = cudaDeviceAttr.cudaDevAttrTextureAlignment - * cudaDevAttrGpuOverlap = cudaDeviceAttr.cudaDevAttrGpuOverlap - * cudaDevAttrMultiProcessorCount = cudaDeviceAttr.cudaDevAttrMultiProcessorCount # <<<<<<<<<<<<<< - * cudaDevAttrKernelExecTimeout = cudaDeviceAttr.cudaDevAttrKernelExecTimeout - * cudaDevAttrIntegrated = cudaDeviceAttr.cudaDevAttrIntegrated - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMultiProcessorCount)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 30, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMultiProcessorCount, __pyx_t_4) < 0) __PYX_ERR(0, 30, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":31 - * cudaDevAttrGpuOverlap = cudaDeviceAttr.cudaDevAttrGpuOverlap - * cudaDevAttrMultiProcessorCount = cudaDeviceAttr.cudaDevAttrMultiProcessorCount - * cudaDevAttrKernelExecTimeout = cudaDeviceAttr.cudaDevAttrKernelExecTimeout # <<<<<<<<<<<<<< - * cudaDevAttrIntegrated = cudaDeviceAttr.cudaDevAttrIntegrated - * cudaDevAttrCanMapHostMemory = cudaDeviceAttr.cudaDevAttrCanMapHostMemory - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrKernelExecTimeout)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 31, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrKernelExecTimeout, __pyx_t_4) < 0) __PYX_ERR(0, 31, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":32 - * cudaDevAttrMultiProcessorCount = cudaDeviceAttr.cudaDevAttrMultiProcessorCount - * cudaDevAttrKernelExecTimeout = cudaDeviceAttr.cudaDevAttrKernelExecTimeout - * cudaDevAttrIntegrated = cudaDeviceAttr.cudaDevAttrIntegrated # <<<<<<<<<<<<<< - * cudaDevAttrCanMapHostMemory = cudaDeviceAttr.cudaDevAttrCanMapHostMemory - * cudaDevAttrComputeMode = cudaDeviceAttr.cudaDevAttrComputeMode - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrIntegrated)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 32, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrIntegrated, __pyx_t_4) < 0) __PYX_ERR(0, 32, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":33 - * cudaDevAttrKernelExecTimeout = cudaDeviceAttr.cudaDevAttrKernelExecTimeout - * cudaDevAttrIntegrated = cudaDeviceAttr.cudaDevAttrIntegrated - * cudaDevAttrCanMapHostMemory = cudaDeviceAttr.cudaDevAttrCanMapHostMemory # <<<<<<<<<<<<<< - * cudaDevAttrComputeMode = cudaDeviceAttr.cudaDevAttrComputeMode - * cudaDevAttrMaxTexture1DWidth = cudaDeviceAttr.cudaDevAttrMaxTexture1DWidth - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrCanMapHostMemory)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 33, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrCanMapHostMemory, __pyx_t_4) < 0) __PYX_ERR(0, 33, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":34 - * cudaDevAttrIntegrated = cudaDeviceAttr.cudaDevAttrIntegrated - * cudaDevAttrCanMapHostMemory = cudaDeviceAttr.cudaDevAttrCanMapHostMemory - * cudaDevAttrComputeMode = cudaDeviceAttr.cudaDevAttrComputeMode # <<<<<<<<<<<<<< - * cudaDevAttrMaxTexture1DWidth = cudaDeviceAttr.cudaDevAttrMaxTexture1DWidth - * cudaDevAttrMaxTexture2DWidth = cudaDeviceAttr.cudaDevAttrMaxTexture2DWidth - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrComputeMode)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 34, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrComputeMode, __pyx_t_4) < 0) __PYX_ERR(0, 34, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":35 - * cudaDevAttrCanMapHostMemory = cudaDeviceAttr.cudaDevAttrCanMapHostMemory - * cudaDevAttrComputeMode = cudaDeviceAttr.cudaDevAttrComputeMode - * cudaDevAttrMaxTexture1DWidth = cudaDeviceAttr.cudaDevAttrMaxTexture1DWidth # <<<<<<<<<<<<<< - * cudaDevAttrMaxTexture2DWidth = cudaDeviceAttr.cudaDevAttrMaxTexture2DWidth - * cudaDevAttrMaxTexture2DHeight = cudaDeviceAttr.cudaDevAttrMaxTexture2DHeight - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxTexture1DWidth)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 35, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxTexture1DWidth, __pyx_t_4) < 0) __PYX_ERR(0, 35, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":36 - * cudaDevAttrComputeMode = cudaDeviceAttr.cudaDevAttrComputeMode - * cudaDevAttrMaxTexture1DWidth = cudaDeviceAttr.cudaDevAttrMaxTexture1DWidth - * cudaDevAttrMaxTexture2DWidth = cudaDeviceAttr.cudaDevAttrMaxTexture2DWidth # <<<<<<<<<<<<<< - * cudaDevAttrMaxTexture2DHeight = cudaDeviceAttr.cudaDevAttrMaxTexture2DHeight - * cudaDevAttrMaxTexture3DWidth = cudaDeviceAttr.cudaDevAttrMaxTexture3DWidth - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxTexture2DWidth)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 36, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxTexture2DWidth, __pyx_t_4) < 0) __PYX_ERR(0, 36, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":37 - * cudaDevAttrMaxTexture1DWidth = cudaDeviceAttr.cudaDevAttrMaxTexture1DWidth - * cudaDevAttrMaxTexture2DWidth = cudaDeviceAttr.cudaDevAttrMaxTexture2DWidth - * cudaDevAttrMaxTexture2DHeight = cudaDeviceAttr.cudaDevAttrMaxTexture2DHeight # <<<<<<<<<<<<<< - * cudaDevAttrMaxTexture3DWidth = cudaDeviceAttr.cudaDevAttrMaxTexture3DWidth - * cudaDevAttrMaxTexture3DHeight = cudaDeviceAttr.cudaDevAttrMaxTexture3DHeight - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxTexture2DHeight)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 37, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxTexture2DHeight, __pyx_t_4) < 0) __PYX_ERR(0, 37, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":38 - * cudaDevAttrMaxTexture2DWidth = cudaDeviceAttr.cudaDevAttrMaxTexture2DWidth - * cudaDevAttrMaxTexture2DHeight = cudaDeviceAttr.cudaDevAttrMaxTexture2DHeight - * cudaDevAttrMaxTexture3DWidth = cudaDeviceAttr.cudaDevAttrMaxTexture3DWidth # <<<<<<<<<<<<<< - * cudaDevAttrMaxTexture3DHeight = cudaDeviceAttr.cudaDevAttrMaxTexture3DHeight - * cudaDevAttrMaxTexture3DDepth = cudaDeviceAttr.cudaDevAttrMaxTexture3DDepth - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxTexture3DWidth)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 38, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxTexture3DWidth, __pyx_t_4) < 0) __PYX_ERR(0, 38, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":39 - * cudaDevAttrMaxTexture2DHeight = cudaDeviceAttr.cudaDevAttrMaxTexture2DHeight - * cudaDevAttrMaxTexture3DWidth = cudaDeviceAttr.cudaDevAttrMaxTexture3DWidth - * cudaDevAttrMaxTexture3DHeight = cudaDeviceAttr.cudaDevAttrMaxTexture3DHeight # <<<<<<<<<<<<<< - * cudaDevAttrMaxTexture3DDepth = cudaDeviceAttr.cudaDevAttrMaxTexture3DDepth - * cudaDevAttrMaxTexture2DLayeredWidth = cudaDeviceAttr.cudaDevAttrMaxTexture2DLayeredWidth - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxTexture3DHeight)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 39, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxTexture3DHeight, __pyx_t_4) < 0) __PYX_ERR(0, 39, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":40 - * cudaDevAttrMaxTexture3DWidth = cudaDeviceAttr.cudaDevAttrMaxTexture3DWidth - * cudaDevAttrMaxTexture3DHeight = cudaDeviceAttr.cudaDevAttrMaxTexture3DHeight - * cudaDevAttrMaxTexture3DDepth = cudaDeviceAttr.cudaDevAttrMaxTexture3DDepth # <<<<<<<<<<<<<< - * cudaDevAttrMaxTexture2DLayeredWidth = cudaDeviceAttr.cudaDevAttrMaxTexture2DLayeredWidth - * cudaDevAttrMaxTexture2DLayeredHeight = cudaDeviceAttr.cudaDevAttrMaxTexture2DLayeredHeight - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxTexture3DDepth)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 40, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxTexture3DDepth, __pyx_t_4) < 0) __PYX_ERR(0, 40, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":41 - * cudaDevAttrMaxTexture3DHeight = cudaDeviceAttr.cudaDevAttrMaxTexture3DHeight - * cudaDevAttrMaxTexture3DDepth = cudaDeviceAttr.cudaDevAttrMaxTexture3DDepth - * cudaDevAttrMaxTexture2DLayeredWidth = cudaDeviceAttr.cudaDevAttrMaxTexture2DLayeredWidth # <<<<<<<<<<<<<< - * cudaDevAttrMaxTexture2DLayeredHeight = cudaDeviceAttr.cudaDevAttrMaxTexture2DLayeredHeight - * cudaDevAttrMaxTexture2DLayeredLayers = cudaDeviceAttr.cudaDevAttrMaxTexture2DLayeredLayers - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxTexture2DLayeredWidth)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 41, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxTexture2DLayeredWi, __pyx_t_4) < 0) __PYX_ERR(0, 41, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":42 - * cudaDevAttrMaxTexture3DDepth = cudaDeviceAttr.cudaDevAttrMaxTexture3DDepth - * cudaDevAttrMaxTexture2DLayeredWidth = cudaDeviceAttr.cudaDevAttrMaxTexture2DLayeredWidth - * cudaDevAttrMaxTexture2DLayeredHeight = cudaDeviceAttr.cudaDevAttrMaxTexture2DLayeredHeight # <<<<<<<<<<<<<< - * cudaDevAttrMaxTexture2DLayeredLayers = cudaDeviceAttr.cudaDevAttrMaxTexture2DLayeredLayers - * cudaDevAttrSurfaceAlignment = cudaDeviceAttr.cudaDevAttrSurfaceAlignment - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxTexture2DLayeredHeight)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 42, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxTexture2DLayeredHe, __pyx_t_4) < 0) __PYX_ERR(0, 42, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":43 - * cudaDevAttrMaxTexture2DLayeredWidth = cudaDeviceAttr.cudaDevAttrMaxTexture2DLayeredWidth - * cudaDevAttrMaxTexture2DLayeredHeight = cudaDeviceAttr.cudaDevAttrMaxTexture2DLayeredHeight - * cudaDevAttrMaxTexture2DLayeredLayers = cudaDeviceAttr.cudaDevAttrMaxTexture2DLayeredLayers # <<<<<<<<<<<<<< - * cudaDevAttrSurfaceAlignment = cudaDeviceAttr.cudaDevAttrSurfaceAlignment - * cudaDevAttrConcurrentKernels = cudaDeviceAttr.cudaDevAttrConcurrentKernels - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxTexture2DLayeredLayers)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 43, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxTexture2DLayeredLa, __pyx_t_4) < 0) __PYX_ERR(0, 43, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":44 - * cudaDevAttrMaxTexture2DLayeredHeight = cudaDeviceAttr.cudaDevAttrMaxTexture2DLayeredHeight - * cudaDevAttrMaxTexture2DLayeredLayers = cudaDeviceAttr.cudaDevAttrMaxTexture2DLayeredLayers - * cudaDevAttrSurfaceAlignment = cudaDeviceAttr.cudaDevAttrSurfaceAlignment # <<<<<<<<<<<<<< - * cudaDevAttrConcurrentKernels = cudaDeviceAttr.cudaDevAttrConcurrentKernels - * cudaDevAttrEccEnabled = cudaDeviceAttr.cudaDevAttrEccEnabled - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrSurfaceAlignment)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 44, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrSurfaceAlignment, __pyx_t_4) < 0) __PYX_ERR(0, 44, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":45 - * cudaDevAttrMaxTexture2DLayeredLayers = cudaDeviceAttr.cudaDevAttrMaxTexture2DLayeredLayers - * cudaDevAttrSurfaceAlignment = cudaDeviceAttr.cudaDevAttrSurfaceAlignment - * cudaDevAttrConcurrentKernels = cudaDeviceAttr.cudaDevAttrConcurrentKernels # <<<<<<<<<<<<<< - * cudaDevAttrEccEnabled = cudaDeviceAttr.cudaDevAttrEccEnabled - * cudaDevAttrPciBusId = cudaDeviceAttr.cudaDevAttrPciBusId - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrConcurrentKernels)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 45, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrConcurrentKernels, __pyx_t_4) < 0) __PYX_ERR(0, 45, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":46 - * cudaDevAttrSurfaceAlignment = cudaDeviceAttr.cudaDevAttrSurfaceAlignment - * cudaDevAttrConcurrentKernels = cudaDeviceAttr.cudaDevAttrConcurrentKernels - * cudaDevAttrEccEnabled = cudaDeviceAttr.cudaDevAttrEccEnabled # <<<<<<<<<<<<<< - * cudaDevAttrPciBusId = cudaDeviceAttr.cudaDevAttrPciBusId - * cudaDevAttrPciDeviceId = cudaDeviceAttr.cudaDevAttrPciDeviceId - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrEccEnabled)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 46, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrEccEnabled, __pyx_t_4) < 0) __PYX_ERR(0, 46, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":47 - * cudaDevAttrConcurrentKernels = cudaDeviceAttr.cudaDevAttrConcurrentKernels - * cudaDevAttrEccEnabled = cudaDeviceAttr.cudaDevAttrEccEnabled - * cudaDevAttrPciBusId = cudaDeviceAttr.cudaDevAttrPciBusId # <<<<<<<<<<<<<< - * cudaDevAttrPciDeviceId = cudaDeviceAttr.cudaDevAttrPciDeviceId - * cudaDevAttrTccDriver = cudaDeviceAttr.cudaDevAttrTccDriver - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrPciBusId)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 47, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrPciBusId, __pyx_t_4) < 0) __PYX_ERR(0, 47, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":48 - * cudaDevAttrEccEnabled = cudaDeviceAttr.cudaDevAttrEccEnabled - * cudaDevAttrPciBusId = cudaDeviceAttr.cudaDevAttrPciBusId - * cudaDevAttrPciDeviceId = cudaDeviceAttr.cudaDevAttrPciDeviceId # <<<<<<<<<<<<<< - * cudaDevAttrTccDriver = cudaDeviceAttr.cudaDevAttrTccDriver - * cudaDevAttrMemoryClockRate = cudaDeviceAttr.cudaDevAttrMemoryClockRate - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrPciDeviceId)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 48, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrPciDeviceId, __pyx_t_4) < 0) __PYX_ERR(0, 48, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":49 - * cudaDevAttrPciBusId = cudaDeviceAttr.cudaDevAttrPciBusId - * cudaDevAttrPciDeviceId = cudaDeviceAttr.cudaDevAttrPciDeviceId - * cudaDevAttrTccDriver = cudaDeviceAttr.cudaDevAttrTccDriver # <<<<<<<<<<<<<< - * cudaDevAttrMemoryClockRate = cudaDeviceAttr.cudaDevAttrMemoryClockRate - * cudaDevAttrGlobalMemoryBusWidth = cudaDeviceAttr.cudaDevAttrGlobalMemoryBusWidth - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrTccDriver)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 49, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrTccDriver, __pyx_t_4) < 0) __PYX_ERR(0, 49, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":50 - * cudaDevAttrPciDeviceId = cudaDeviceAttr.cudaDevAttrPciDeviceId - * cudaDevAttrTccDriver = cudaDeviceAttr.cudaDevAttrTccDriver - * cudaDevAttrMemoryClockRate = cudaDeviceAttr.cudaDevAttrMemoryClockRate # <<<<<<<<<<<<<< - * cudaDevAttrGlobalMemoryBusWidth = cudaDeviceAttr.cudaDevAttrGlobalMemoryBusWidth - * cudaDevAttrL2CacheSize = cudaDeviceAttr.cudaDevAttrL2CacheSize - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMemoryClockRate)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 50, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMemoryClockRate, __pyx_t_4) < 0) __PYX_ERR(0, 50, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":51 - * cudaDevAttrTccDriver = cudaDeviceAttr.cudaDevAttrTccDriver - * cudaDevAttrMemoryClockRate = cudaDeviceAttr.cudaDevAttrMemoryClockRate - * cudaDevAttrGlobalMemoryBusWidth = cudaDeviceAttr.cudaDevAttrGlobalMemoryBusWidth # <<<<<<<<<<<<<< - * cudaDevAttrL2CacheSize = cudaDeviceAttr.cudaDevAttrL2CacheSize - * cudaDevAttrMaxThreadsPerMultiProcessor = cudaDeviceAttr.cudaDevAttrMaxThreadsPerMultiProcessor - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrGlobalMemoryBusWidth)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 51, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrGlobalMemoryBusWidth, __pyx_t_4) < 0) __PYX_ERR(0, 51, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":52 - * cudaDevAttrMemoryClockRate = cudaDeviceAttr.cudaDevAttrMemoryClockRate - * cudaDevAttrGlobalMemoryBusWidth = cudaDeviceAttr.cudaDevAttrGlobalMemoryBusWidth - * cudaDevAttrL2CacheSize = cudaDeviceAttr.cudaDevAttrL2CacheSize # <<<<<<<<<<<<<< - * cudaDevAttrMaxThreadsPerMultiProcessor = cudaDeviceAttr.cudaDevAttrMaxThreadsPerMultiProcessor - * cudaDevAttrAsyncEngineCount = cudaDeviceAttr.cudaDevAttrAsyncEngineCount - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrL2CacheSize)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 52, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrL2CacheSize, __pyx_t_4) < 0) __PYX_ERR(0, 52, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":53 - * cudaDevAttrGlobalMemoryBusWidth = cudaDeviceAttr.cudaDevAttrGlobalMemoryBusWidth - * cudaDevAttrL2CacheSize = cudaDeviceAttr.cudaDevAttrL2CacheSize - * cudaDevAttrMaxThreadsPerMultiProcessor = cudaDeviceAttr.cudaDevAttrMaxThreadsPerMultiProcessor # <<<<<<<<<<<<<< - * cudaDevAttrAsyncEngineCount = cudaDeviceAttr.cudaDevAttrAsyncEngineCount - * cudaDevAttrUnifiedAddressing = cudaDeviceAttr.cudaDevAttrUnifiedAddressing - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxThreadsPerMultiProcessor)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 53, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxThreadsPerMultiPro, __pyx_t_4) < 0) __PYX_ERR(0, 53, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":54 - * cudaDevAttrL2CacheSize = cudaDeviceAttr.cudaDevAttrL2CacheSize - * cudaDevAttrMaxThreadsPerMultiProcessor = cudaDeviceAttr.cudaDevAttrMaxThreadsPerMultiProcessor - * cudaDevAttrAsyncEngineCount = cudaDeviceAttr.cudaDevAttrAsyncEngineCount # <<<<<<<<<<<<<< - * cudaDevAttrUnifiedAddressing = cudaDeviceAttr.cudaDevAttrUnifiedAddressing - * cudaDevAttrMaxTexture1DLayeredWidth = cudaDeviceAttr.cudaDevAttrMaxTexture1DLayeredWidth - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrAsyncEngineCount)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 54, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrAsyncEngineCount, __pyx_t_4) < 0) __PYX_ERR(0, 54, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":55 - * cudaDevAttrMaxThreadsPerMultiProcessor = cudaDeviceAttr.cudaDevAttrMaxThreadsPerMultiProcessor - * cudaDevAttrAsyncEngineCount = cudaDeviceAttr.cudaDevAttrAsyncEngineCount - * cudaDevAttrUnifiedAddressing = cudaDeviceAttr.cudaDevAttrUnifiedAddressing # <<<<<<<<<<<<<< - * cudaDevAttrMaxTexture1DLayeredWidth = cudaDeviceAttr.cudaDevAttrMaxTexture1DLayeredWidth - * cudaDevAttrMaxTexture1DLayeredLayers = cudaDeviceAttr.cudaDevAttrMaxTexture1DLayeredLayers - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrUnifiedAddressing)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 55, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrUnifiedAddressing, __pyx_t_4) < 0) __PYX_ERR(0, 55, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":56 - * cudaDevAttrAsyncEngineCount = cudaDeviceAttr.cudaDevAttrAsyncEngineCount - * cudaDevAttrUnifiedAddressing = cudaDeviceAttr.cudaDevAttrUnifiedAddressing - * cudaDevAttrMaxTexture1DLayeredWidth = cudaDeviceAttr.cudaDevAttrMaxTexture1DLayeredWidth # <<<<<<<<<<<<<< - * cudaDevAttrMaxTexture1DLayeredLayers = cudaDeviceAttr.cudaDevAttrMaxTexture1DLayeredLayers - * cudaDevAttrMaxTexture2DGatherWidth = cudaDeviceAttr.cudaDevAttrMaxTexture2DGatherWidth - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxTexture1DLayeredWidth)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 56, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxTexture1DLayeredWi, __pyx_t_4) < 0) __PYX_ERR(0, 56, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":57 - * cudaDevAttrUnifiedAddressing = cudaDeviceAttr.cudaDevAttrUnifiedAddressing - * cudaDevAttrMaxTexture1DLayeredWidth = cudaDeviceAttr.cudaDevAttrMaxTexture1DLayeredWidth - * cudaDevAttrMaxTexture1DLayeredLayers = cudaDeviceAttr.cudaDevAttrMaxTexture1DLayeredLayers # <<<<<<<<<<<<<< - * cudaDevAttrMaxTexture2DGatherWidth = cudaDeviceAttr.cudaDevAttrMaxTexture2DGatherWidth - * cudaDevAttrMaxTexture2DGatherHeight = cudaDeviceAttr.cudaDevAttrMaxTexture2DGatherHeight - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxTexture1DLayeredLayers)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 57, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxTexture1DLayeredLa, __pyx_t_4) < 0) __PYX_ERR(0, 57, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":58 - * cudaDevAttrMaxTexture1DLayeredWidth = cudaDeviceAttr.cudaDevAttrMaxTexture1DLayeredWidth - * cudaDevAttrMaxTexture1DLayeredLayers = cudaDeviceAttr.cudaDevAttrMaxTexture1DLayeredLayers - * cudaDevAttrMaxTexture2DGatherWidth = cudaDeviceAttr.cudaDevAttrMaxTexture2DGatherWidth # <<<<<<<<<<<<<< - * cudaDevAttrMaxTexture2DGatherHeight = cudaDeviceAttr.cudaDevAttrMaxTexture2DGatherHeight - * cudaDevAttrMaxTexture3DWidthAlt = cudaDeviceAttr.cudaDevAttrMaxTexture3DWidthAlt - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxTexture2DGatherWidth)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 58, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxTexture2DGatherWid, __pyx_t_4) < 0) __PYX_ERR(0, 58, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":59 - * cudaDevAttrMaxTexture1DLayeredLayers = cudaDeviceAttr.cudaDevAttrMaxTexture1DLayeredLayers - * cudaDevAttrMaxTexture2DGatherWidth = cudaDeviceAttr.cudaDevAttrMaxTexture2DGatherWidth - * cudaDevAttrMaxTexture2DGatherHeight = cudaDeviceAttr.cudaDevAttrMaxTexture2DGatherHeight # <<<<<<<<<<<<<< - * cudaDevAttrMaxTexture3DWidthAlt = cudaDeviceAttr.cudaDevAttrMaxTexture3DWidthAlt - * cudaDevAttrMaxTexture3DHeightAlt = cudaDeviceAttr.cudaDevAttrMaxTexture3DHeightAlt - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxTexture2DGatherHeight)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 59, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxTexture2DGatherHei, __pyx_t_4) < 0) __PYX_ERR(0, 59, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":60 - * cudaDevAttrMaxTexture2DGatherWidth = cudaDeviceAttr.cudaDevAttrMaxTexture2DGatherWidth - * cudaDevAttrMaxTexture2DGatherHeight = cudaDeviceAttr.cudaDevAttrMaxTexture2DGatherHeight - * cudaDevAttrMaxTexture3DWidthAlt = cudaDeviceAttr.cudaDevAttrMaxTexture3DWidthAlt # <<<<<<<<<<<<<< - * cudaDevAttrMaxTexture3DHeightAlt = cudaDeviceAttr.cudaDevAttrMaxTexture3DHeightAlt - * cudaDevAttrMaxTexture3DDepthAlt = cudaDeviceAttr.cudaDevAttrMaxTexture3DDepthAlt - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxTexture3DWidthAlt)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 60, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxTexture3DWidthAlt, __pyx_t_4) < 0) __PYX_ERR(0, 60, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":61 - * cudaDevAttrMaxTexture2DGatherHeight = cudaDeviceAttr.cudaDevAttrMaxTexture2DGatherHeight - * cudaDevAttrMaxTexture3DWidthAlt = cudaDeviceAttr.cudaDevAttrMaxTexture3DWidthAlt - * cudaDevAttrMaxTexture3DHeightAlt = cudaDeviceAttr.cudaDevAttrMaxTexture3DHeightAlt # <<<<<<<<<<<<<< - * cudaDevAttrMaxTexture3DDepthAlt = cudaDeviceAttr.cudaDevAttrMaxTexture3DDepthAlt - * cudaDevAttrPciDomainId = cudaDeviceAttr.cudaDevAttrPciDomainId - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxTexture3DHeightAlt)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 61, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxTexture3DHeightAlt, __pyx_t_4) < 0) __PYX_ERR(0, 61, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":62 - * cudaDevAttrMaxTexture3DWidthAlt = cudaDeviceAttr.cudaDevAttrMaxTexture3DWidthAlt - * cudaDevAttrMaxTexture3DHeightAlt = cudaDeviceAttr.cudaDevAttrMaxTexture3DHeightAlt - * cudaDevAttrMaxTexture3DDepthAlt = cudaDeviceAttr.cudaDevAttrMaxTexture3DDepthAlt # <<<<<<<<<<<<<< - * cudaDevAttrPciDomainId = cudaDeviceAttr.cudaDevAttrPciDomainId - * cudaDevAttrTexturePitchAlignment = cudaDeviceAttr.cudaDevAttrTexturePitchAlignment - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxTexture3DDepthAlt)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 62, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxTexture3DDepthAlt, __pyx_t_4) < 0) __PYX_ERR(0, 62, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":63 - * cudaDevAttrMaxTexture3DHeightAlt = cudaDeviceAttr.cudaDevAttrMaxTexture3DHeightAlt - * cudaDevAttrMaxTexture3DDepthAlt = cudaDeviceAttr.cudaDevAttrMaxTexture3DDepthAlt - * cudaDevAttrPciDomainId = cudaDeviceAttr.cudaDevAttrPciDomainId # <<<<<<<<<<<<<< - * cudaDevAttrTexturePitchAlignment = cudaDeviceAttr.cudaDevAttrTexturePitchAlignment - * cudaDevAttrMaxTextureCubemapWidth = cudaDeviceAttr.cudaDevAttrMaxTextureCubemapWidth - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrPciDomainId)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 63, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrPciDomainId, __pyx_t_4) < 0) __PYX_ERR(0, 63, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":64 - * cudaDevAttrMaxTexture3DDepthAlt = cudaDeviceAttr.cudaDevAttrMaxTexture3DDepthAlt - * cudaDevAttrPciDomainId = cudaDeviceAttr.cudaDevAttrPciDomainId - * cudaDevAttrTexturePitchAlignment = cudaDeviceAttr.cudaDevAttrTexturePitchAlignment # <<<<<<<<<<<<<< - * cudaDevAttrMaxTextureCubemapWidth = cudaDeviceAttr.cudaDevAttrMaxTextureCubemapWidth - * cudaDevAttrMaxTextureCubemapLayeredWidth = cudaDeviceAttr.cudaDevAttrMaxTextureCubemapLayeredWidth - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrTexturePitchAlignment)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 64, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrTexturePitchAlignment, __pyx_t_4) < 0) __PYX_ERR(0, 64, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":65 - * cudaDevAttrPciDomainId = cudaDeviceAttr.cudaDevAttrPciDomainId - * cudaDevAttrTexturePitchAlignment = cudaDeviceAttr.cudaDevAttrTexturePitchAlignment - * cudaDevAttrMaxTextureCubemapWidth = cudaDeviceAttr.cudaDevAttrMaxTextureCubemapWidth # <<<<<<<<<<<<<< - * cudaDevAttrMaxTextureCubemapLayeredWidth = cudaDeviceAttr.cudaDevAttrMaxTextureCubemapLayeredWidth - * cudaDevAttrMaxTextureCubemapLayeredLayers = cudaDeviceAttr.cudaDevAttrMaxTextureCubemapLayeredLayers - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxTextureCubemapWidth)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 65, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxTextureCubemapWidt, __pyx_t_4) < 0) __PYX_ERR(0, 65, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":66 - * cudaDevAttrTexturePitchAlignment = cudaDeviceAttr.cudaDevAttrTexturePitchAlignment - * cudaDevAttrMaxTextureCubemapWidth = cudaDeviceAttr.cudaDevAttrMaxTextureCubemapWidth - * cudaDevAttrMaxTextureCubemapLayeredWidth = cudaDeviceAttr.cudaDevAttrMaxTextureCubemapLayeredWidth # <<<<<<<<<<<<<< - * cudaDevAttrMaxTextureCubemapLayeredLayers = cudaDeviceAttr.cudaDevAttrMaxTextureCubemapLayeredLayers - * cudaDevAttrMaxSurface1DWidth = cudaDeviceAttr.cudaDevAttrMaxSurface1DWidth - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxTextureCubemapLayeredWidth)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 66, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxTextureCubemapLaye, __pyx_t_4) < 0) __PYX_ERR(0, 66, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":67 - * cudaDevAttrMaxTextureCubemapWidth = cudaDeviceAttr.cudaDevAttrMaxTextureCubemapWidth - * cudaDevAttrMaxTextureCubemapLayeredWidth = cudaDeviceAttr.cudaDevAttrMaxTextureCubemapLayeredWidth - * cudaDevAttrMaxTextureCubemapLayeredLayers = cudaDeviceAttr.cudaDevAttrMaxTextureCubemapLayeredLayers # <<<<<<<<<<<<<< - * cudaDevAttrMaxSurface1DWidth = cudaDeviceAttr.cudaDevAttrMaxSurface1DWidth - * cudaDevAttrMaxSurface2DWidth = cudaDeviceAttr.cudaDevAttrMaxSurface2DWidth - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxTextureCubemapLayeredLayers)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 67, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxTextureCubemapLaye_2, __pyx_t_4) < 0) __PYX_ERR(0, 67, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":68 - * cudaDevAttrMaxTextureCubemapLayeredWidth = cudaDeviceAttr.cudaDevAttrMaxTextureCubemapLayeredWidth - * cudaDevAttrMaxTextureCubemapLayeredLayers = cudaDeviceAttr.cudaDevAttrMaxTextureCubemapLayeredLayers - * cudaDevAttrMaxSurface1DWidth = cudaDeviceAttr.cudaDevAttrMaxSurface1DWidth # <<<<<<<<<<<<<< - * cudaDevAttrMaxSurface2DWidth = cudaDeviceAttr.cudaDevAttrMaxSurface2DWidth - * cudaDevAttrMaxSurface2DHeight = cudaDeviceAttr.cudaDevAttrMaxSurface2DHeight - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxSurface1DWidth)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 68, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxSurface1DWidth, __pyx_t_4) < 0) __PYX_ERR(0, 68, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":69 - * cudaDevAttrMaxTextureCubemapLayeredLayers = cudaDeviceAttr.cudaDevAttrMaxTextureCubemapLayeredLayers - * cudaDevAttrMaxSurface1DWidth = cudaDeviceAttr.cudaDevAttrMaxSurface1DWidth - * cudaDevAttrMaxSurface2DWidth = cudaDeviceAttr.cudaDevAttrMaxSurface2DWidth # <<<<<<<<<<<<<< - * cudaDevAttrMaxSurface2DHeight = cudaDeviceAttr.cudaDevAttrMaxSurface2DHeight - * cudaDevAttrMaxSurface3DWidth = cudaDeviceAttr.cudaDevAttrMaxSurface3DWidth - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxSurface2DWidth)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 69, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxSurface2DWidth, __pyx_t_4) < 0) __PYX_ERR(0, 69, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":70 - * cudaDevAttrMaxSurface1DWidth = cudaDeviceAttr.cudaDevAttrMaxSurface1DWidth - * cudaDevAttrMaxSurface2DWidth = cudaDeviceAttr.cudaDevAttrMaxSurface2DWidth - * cudaDevAttrMaxSurface2DHeight = cudaDeviceAttr.cudaDevAttrMaxSurface2DHeight # <<<<<<<<<<<<<< - * cudaDevAttrMaxSurface3DWidth = cudaDeviceAttr.cudaDevAttrMaxSurface3DWidth - * cudaDevAttrMaxSurface3DHeight = cudaDeviceAttr.cudaDevAttrMaxSurface3DHeight - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxSurface2DHeight)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 70, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxSurface2DHeight, __pyx_t_4) < 0) __PYX_ERR(0, 70, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":71 - * cudaDevAttrMaxSurface2DWidth = cudaDeviceAttr.cudaDevAttrMaxSurface2DWidth - * cudaDevAttrMaxSurface2DHeight = cudaDeviceAttr.cudaDevAttrMaxSurface2DHeight - * cudaDevAttrMaxSurface3DWidth = cudaDeviceAttr.cudaDevAttrMaxSurface3DWidth # <<<<<<<<<<<<<< - * cudaDevAttrMaxSurface3DHeight = cudaDeviceAttr.cudaDevAttrMaxSurface3DHeight - * cudaDevAttrMaxSurface3DDepth = cudaDeviceAttr.cudaDevAttrMaxSurface3DDepth - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxSurface3DWidth)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 71, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxSurface3DWidth, __pyx_t_4) < 0) __PYX_ERR(0, 71, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":72 - * cudaDevAttrMaxSurface2DHeight = cudaDeviceAttr.cudaDevAttrMaxSurface2DHeight - * cudaDevAttrMaxSurface3DWidth = cudaDeviceAttr.cudaDevAttrMaxSurface3DWidth - * cudaDevAttrMaxSurface3DHeight = cudaDeviceAttr.cudaDevAttrMaxSurface3DHeight # <<<<<<<<<<<<<< - * cudaDevAttrMaxSurface3DDepth = cudaDeviceAttr.cudaDevAttrMaxSurface3DDepth - * cudaDevAttrMaxSurface1DLayeredWidth = cudaDeviceAttr.cudaDevAttrMaxSurface1DLayeredWidth - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxSurface3DHeight)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 72, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxSurface3DHeight, __pyx_t_4) < 0) __PYX_ERR(0, 72, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":73 - * cudaDevAttrMaxSurface3DWidth = cudaDeviceAttr.cudaDevAttrMaxSurface3DWidth - * cudaDevAttrMaxSurface3DHeight = cudaDeviceAttr.cudaDevAttrMaxSurface3DHeight - * cudaDevAttrMaxSurface3DDepth = cudaDeviceAttr.cudaDevAttrMaxSurface3DDepth # <<<<<<<<<<<<<< - * cudaDevAttrMaxSurface1DLayeredWidth = cudaDeviceAttr.cudaDevAttrMaxSurface1DLayeredWidth - * cudaDevAttrMaxSurface1DLayeredLayers = cudaDeviceAttr.cudaDevAttrMaxSurface1DLayeredLayers - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxSurface3DDepth)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 73, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxSurface3DDepth, __pyx_t_4) < 0) __PYX_ERR(0, 73, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":74 - * cudaDevAttrMaxSurface3DHeight = cudaDeviceAttr.cudaDevAttrMaxSurface3DHeight - * cudaDevAttrMaxSurface3DDepth = cudaDeviceAttr.cudaDevAttrMaxSurface3DDepth - * cudaDevAttrMaxSurface1DLayeredWidth = cudaDeviceAttr.cudaDevAttrMaxSurface1DLayeredWidth # <<<<<<<<<<<<<< - * cudaDevAttrMaxSurface1DLayeredLayers = cudaDeviceAttr.cudaDevAttrMaxSurface1DLayeredLayers - * cudaDevAttrMaxSurface2DLayeredWidth = cudaDeviceAttr.cudaDevAttrMaxSurface2DLayeredWidth - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxSurface1DLayeredWidth)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 74, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxSurface1DLayeredWi, __pyx_t_4) < 0) __PYX_ERR(0, 74, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":75 - * cudaDevAttrMaxSurface3DDepth = cudaDeviceAttr.cudaDevAttrMaxSurface3DDepth - * cudaDevAttrMaxSurface1DLayeredWidth = cudaDeviceAttr.cudaDevAttrMaxSurface1DLayeredWidth - * cudaDevAttrMaxSurface1DLayeredLayers = cudaDeviceAttr.cudaDevAttrMaxSurface1DLayeredLayers # <<<<<<<<<<<<<< - * cudaDevAttrMaxSurface2DLayeredWidth = cudaDeviceAttr.cudaDevAttrMaxSurface2DLayeredWidth - * cudaDevAttrMaxSurface2DLayeredHeight = cudaDeviceAttr.cudaDevAttrMaxSurface2DLayeredHeight - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxSurface1DLayeredLayers)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 75, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxSurface1DLayeredLa, __pyx_t_4) < 0) __PYX_ERR(0, 75, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":76 - * cudaDevAttrMaxSurface1DLayeredWidth = cudaDeviceAttr.cudaDevAttrMaxSurface1DLayeredWidth - * cudaDevAttrMaxSurface1DLayeredLayers = cudaDeviceAttr.cudaDevAttrMaxSurface1DLayeredLayers - * cudaDevAttrMaxSurface2DLayeredWidth = cudaDeviceAttr.cudaDevAttrMaxSurface2DLayeredWidth # <<<<<<<<<<<<<< - * cudaDevAttrMaxSurface2DLayeredHeight = cudaDeviceAttr.cudaDevAttrMaxSurface2DLayeredHeight - * cudaDevAttrMaxSurface2DLayeredLayers = cudaDeviceAttr.cudaDevAttrMaxSurface2DLayeredLayers - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxSurface2DLayeredWidth)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 76, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxSurface2DLayeredWi, __pyx_t_4) < 0) __PYX_ERR(0, 76, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":77 - * cudaDevAttrMaxSurface1DLayeredLayers = cudaDeviceAttr.cudaDevAttrMaxSurface1DLayeredLayers - * cudaDevAttrMaxSurface2DLayeredWidth = cudaDeviceAttr.cudaDevAttrMaxSurface2DLayeredWidth - * cudaDevAttrMaxSurface2DLayeredHeight = cudaDeviceAttr.cudaDevAttrMaxSurface2DLayeredHeight # <<<<<<<<<<<<<< - * cudaDevAttrMaxSurface2DLayeredLayers = cudaDeviceAttr.cudaDevAttrMaxSurface2DLayeredLayers - * cudaDevAttrMaxSurfaceCubemapWidth = cudaDeviceAttr.cudaDevAttrMaxSurfaceCubemapWidth - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxSurface2DLayeredHeight)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 77, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxSurface2DLayeredHe, __pyx_t_4) < 0) __PYX_ERR(0, 77, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":78 - * cudaDevAttrMaxSurface2DLayeredWidth = cudaDeviceAttr.cudaDevAttrMaxSurface2DLayeredWidth - * cudaDevAttrMaxSurface2DLayeredHeight = cudaDeviceAttr.cudaDevAttrMaxSurface2DLayeredHeight - * cudaDevAttrMaxSurface2DLayeredLayers = cudaDeviceAttr.cudaDevAttrMaxSurface2DLayeredLayers # <<<<<<<<<<<<<< - * cudaDevAttrMaxSurfaceCubemapWidth = cudaDeviceAttr.cudaDevAttrMaxSurfaceCubemapWidth - * cudaDevAttrMaxSurfaceCubemapLayeredWidth = cudaDeviceAttr.cudaDevAttrMaxSurfaceCubemapLayeredWidth - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxSurface2DLayeredLayers)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 78, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxSurface2DLayeredLa, __pyx_t_4) < 0) __PYX_ERR(0, 78, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":79 - * cudaDevAttrMaxSurface2DLayeredHeight = cudaDeviceAttr.cudaDevAttrMaxSurface2DLayeredHeight - * cudaDevAttrMaxSurface2DLayeredLayers = cudaDeviceAttr.cudaDevAttrMaxSurface2DLayeredLayers - * cudaDevAttrMaxSurfaceCubemapWidth = cudaDeviceAttr.cudaDevAttrMaxSurfaceCubemapWidth # <<<<<<<<<<<<<< - * cudaDevAttrMaxSurfaceCubemapLayeredWidth = cudaDeviceAttr.cudaDevAttrMaxSurfaceCubemapLayeredWidth - * cudaDevAttrMaxSurfaceCubemapLayeredLayers = cudaDeviceAttr.cudaDevAttrMaxSurfaceCubemapLayeredLayers - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxSurfaceCubemapWidth)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 79, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxSurfaceCubemapWidt, __pyx_t_4) < 0) __PYX_ERR(0, 79, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":80 - * cudaDevAttrMaxSurface2DLayeredLayers = cudaDeviceAttr.cudaDevAttrMaxSurface2DLayeredLayers - * cudaDevAttrMaxSurfaceCubemapWidth = cudaDeviceAttr.cudaDevAttrMaxSurfaceCubemapWidth - * cudaDevAttrMaxSurfaceCubemapLayeredWidth = cudaDeviceAttr.cudaDevAttrMaxSurfaceCubemapLayeredWidth # <<<<<<<<<<<<<< - * cudaDevAttrMaxSurfaceCubemapLayeredLayers = cudaDeviceAttr.cudaDevAttrMaxSurfaceCubemapLayeredLayers - * cudaDevAttrMaxTexture1DLinearWidth = cudaDeviceAttr.cudaDevAttrMaxTexture1DLinearWidth - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxSurfaceCubemapLayeredWidth)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 80, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxSurfaceCubemapLaye, __pyx_t_4) < 0) __PYX_ERR(0, 80, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":81 - * cudaDevAttrMaxSurfaceCubemapWidth = cudaDeviceAttr.cudaDevAttrMaxSurfaceCubemapWidth - * cudaDevAttrMaxSurfaceCubemapLayeredWidth = cudaDeviceAttr.cudaDevAttrMaxSurfaceCubemapLayeredWidth - * cudaDevAttrMaxSurfaceCubemapLayeredLayers = cudaDeviceAttr.cudaDevAttrMaxSurfaceCubemapLayeredLayers # <<<<<<<<<<<<<< - * cudaDevAttrMaxTexture1DLinearWidth = cudaDeviceAttr.cudaDevAttrMaxTexture1DLinearWidth - * cudaDevAttrMaxTexture2DLinearWidth = cudaDeviceAttr.cudaDevAttrMaxTexture2DLinearWidth - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxSurfaceCubemapLayeredLayers)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 81, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxSurfaceCubemapLaye_2, __pyx_t_4) < 0) __PYX_ERR(0, 81, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":82 - * cudaDevAttrMaxSurfaceCubemapLayeredWidth = cudaDeviceAttr.cudaDevAttrMaxSurfaceCubemapLayeredWidth - * cudaDevAttrMaxSurfaceCubemapLayeredLayers = cudaDeviceAttr.cudaDevAttrMaxSurfaceCubemapLayeredLayers - * cudaDevAttrMaxTexture1DLinearWidth = cudaDeviceAttr.cudaDevAttrMaxTexture1DLinearWidth # <<<<<<<<<<<<<< - * cudaDevAttrMaxTexture2DLinearWidth = cudaDeviceAttr.cudaDevAttrMaxTexture2DLinearWidth - * cudaDevAttrMaxTexture2DLinearHeight = cudaDeviceAttr.cudaDevAttrMaxTexture2DLinearHeight - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxTexture1DLinearWidth)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 82, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxTexture1DLinearWid, __pyx_t_4) < 0) __PYX_ERR(0, 82, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":83 - * cudaDevAttrMaxSurfaceCubemapLayeredLayers = cudaDeviceAttr.cudaDevAttrMaxSurfaceCubemapLayeredLayers - * cudaDevAttrMaxTexture1DLinearWidth = cudaDeviceAttr.cudaDevAttrMaxTexture1DLinearWidth - * cudaDevAttrMaxTexture2DLinearWidth = cudaDeviceAttr.cudaDevAttrMaxTexture2DLinearWidth # <<<<<<<<<<<<<< - * cudaDevAttrMaxTexture2DLinearHeight = cudaDeviceAttr.cudaDevAttrMaxTexture2DLinearHeight - * cudaDevAttrMaxTexture2DLinearPitch = cudaDeviceAttr.cudaDevAttrMaxTexture2DLinearPitch - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxTexture2DLinearWidth)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 83, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxTexture2DLinearWid, __pyx_t_4) < 0) __PYX_ERR(0, 83, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":84 - * cudaDevAttrMaxTexture1DLinearWidth = cudaDeviceAttr.cudaDevAttrMaxTexture1DLinearWidth - * cudaDevAttrMaxTexture2DLinearWidth = cudaDeviceAttr.cudaDevAttrMaxTexture2DLinearWidth - * cudaDevAttrMaxTexture2DLinearHeight = cudaDeviceAttr.cudaDevAttrMaxTexture2DLinearHeight # <<<<<<<<<<<<<< - * cudaDevAttrMaxTexture2DLinearPitch = cudaDeviceAttr.cudaDevAttrMaxTexture2DLinearPitch - * cudaDevAttrMaxTexture2DMipmappedWidth = cudaDeviceAttr.cudaDevAttrMaxTexture2DMipmappedWidth - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxTexture2DLinearHeight)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 84, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxTexture2DLinearHei, __pyx_t_4) < 0) __PYX_ERR(0, 84, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":85 - * cudaDevAttrMaxTexture2DLinearWidth = cudaDeviceAttr.cudaDevAttrMaxTexture2DLinearWidth - * cudaDevAttrMaxTexture2DLinearHeight = cudaDeviceAttr.cudaDevAttrMaxTexture2DLinearHeight - * cudaDevAttrMaxTexture2DLinearPitch = cudaDeviceAttr.cudaDevAttrMaxTexture2DLinearPitch # <<<<<<<<<<<<<< - * cudaDevAttrMaxTexture2DMipmappedWidth = cudaDeviceAttr.cudaDevAttrMaxTexture2DMipmappedWidth - * cudaDevAttrMaxTexture2DMipmappedHeight = cudaDeviceAttr.cudaDevAttrMaxTexture2DMipmappedHeight - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxTexture2DLinearPitch)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 85, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxTexture2DLinearPit, __pyx_t_4) < 0) __PYX_ERR(0, 85, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":86 - * cudaDevAttrMaxTexture2DLinearHeight = cudaDeviceAttr.cudaDevAttrMaxTexture2DLinearHeight - * cudaDevAttrMaxTexture2DLinearPitch = cudaDeviceAttr.cudaDevAttrMaxTexture2DLinearPitch - * cudaDevAttrMaxTexture2DMipmappedWidth = cudaDeviceAttr.cudaDevAttrMaxTexture2DMipmappedWidth # <<<<<<<<<<<<<< - * cudaDevAttrMaxTexture2DMipmappedHeight = cudaDeviceAttr.cudaDevAttrMaxTexture2DMipmappedHeight - * cudaDevAttrComputeCapabilityMajor = cudaDeviceAttr.cudaDevAttrComputeCapabilityMajor - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxTexture2DMipmappedWidth)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 86, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxTexture2DMipmapped, __pyx_t_4) < 0) __PYX_ERR(0, 86, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":87 - * cudaDevAttrMaxTexture2DLinearPitch = cudaDeviceAttr.cudaDevAttrMaxTexture2DLinearPitch - * cudaDevAttrMaxTexture2DMipmappedWidth = cudaDeviceAttr.cudaDevAttrMaxTexture2DMipmappedWidth - * cudaDevAttrMaxTexture2DMipmappedHeight = cudaDeviceAttr.cudaDevAttrMaxTexture2DMipmappedHeight # <<<<<<<<<<<<<< - * cudaDevAttrComputeCapabilityMajor = cudaDeviceAttr.cudaDevAttrComputeCapabilityMajor - * cudaDevAttrComputeCapabilityMinor = cudaDeviceAttr.cudaDevAttrComputeCapabilityMinor - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxTexture2DMipmappedHeight)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 87, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxTexture2DMipmapped_2, __pyx_t_4) < 0) __PYX_ERR(0, 87, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":88 - * cudaDevAttrMaxTexture2DMipmappedWidth = cudaDeviceAttr.cudaDevAttrMaxTexture2DMipmappedWidth - * cudaDevAttrMaxTexture2DMipmappedHeight = cudaDeviceAttr.cudaDevAttrMaxTexture2DMipmappedHeight - * cudaDevAttrComputeCapabilityMajor = cudaDeviceAttr.cudaDevAttrComputeCapabilityMajor # <<<<<<<<<<<<<< - * cudaDevAttrComputeCapabilityMinor = cudaDeviceAttr.cudaDevAttrComputeCapabilityMinor - * cudaDevAttrMaxTexture1DMipmappedWidth = cudaDeviceAttr.cudaDevAttrMaxTexture1DMipmappedWidth - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrComputeCapabilityMajor)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 88, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrComputeCapabilityMajo, __pyx_t_4) < 0) __PYX_ERR(0, 88, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":89 - * cudaDevAttrMaxTexture2DMipmappedHeight = cudaDeviceAttr.cudaDevAttrMaxTexture2DMipmappedHeight - * cudaDevAttrComputeCapabilityMajor = cudaDeviceAttr.cudaDevAttrComputeCapabilityMajor - * cudaDevAttrComputeCapabilityMinor = cudaDeviceAttr.cudaDevAttrComputeCapabilityMinor # <<<<<<<<<<<<<< - * cudaDevAttrMaxTexture1DMipmappedWidth = cudaDeviceAttr.cudaDevAttrMaxTexture1DMipmappedWidth - * cudaDevAttrStreamPrioritiesSupported = cudaDeviceAttr.cudaDevAttrStreamPrioritiesSupported - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrComputeCapabilityMinor)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 89, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrComputeCapabilityMino, __pyx_t_4) < 0) __PYX_ERR(0, 89, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":90 - * cudaDevAttrComputeCapabilityMajor = cudaDeviceAttr.cudaDevAttrComputeCapabilityMajor - * cudaDevAttrComputeCapabilityMinor = cudaDeviceAttr.cudaDevAttrComputeCapabilityMinor - * cudaDevAttrMaxTexture1DMipmappedWidth = cudaDeviceAttr.cudaDevAttrMaxTexture1DMipmappedWidth # <<<<<<<<<<<<<< - * cudaDevAttrStreamPrioritiesSupported = cudaDeviceAttr.cudaDevAttrStreamPrioritiesSupported - * cudaDevAttrGlobalL1CacheSupported = cudaDeviceAttr.cudaDevAttrGlobalL1CacheSupported - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxTexture1DMipmappedWidth)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 90, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxTexture1DMipmapped, __pyx_t_4) < 0) __PYX_ERR(0, 90, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":91 - * cudaDevAttrComputeCapabilityMinor = cudaDeviceAttr.cudaDevAttrComputeCapabilityMinor - * cudaDevAttrMaxTexture1DMipmappedWidth = cudaDeviceAttr.cudaDevAttrMaxTexture1DMipmappedWidth - * cudaDevAttrStreamPrioritiesSupported = cudaDeviceAttr.cudaDevAttrStreamPrioritiesSupported # <<<<<<<<<<<<<< - * cudaDevAttrGlobalL1CacheSupported = cudaDeviceAttr.cudaDevAttrGlobalL1CacheSupported - * cudaDevAttrLocalL1CacheSupported = cudaDeviceAttr.cudaDevAttrLocalL1CacheSupported - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrStreamPrioritiesSupported)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 91, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrStreamPrioritiesSuppo, __pyx_t_4) < 0) __PYX_ERR(0, 91, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":92 - * cudaDevAttrMaxTexture1DMipmappedWidth = cudaDeviceAttr.cudaDevAttrMaxTexture1DMipmappedWidth - * cudaDevAttrStreamPrioritiesSupported = cudaDeviceAttr.cudaDevAttrStreamPrioritiesSupported - * cudaDevAttrGlobalL1CacheSupported = cudaDeviceAttr.cudaDevAttrGlobalL1CacheSupported # <<<<<<<<<<<<<< - * cudaDevAttrLocalL1CacheSupported = cudaDeviceAttr.cudaDevAttrLocalL1CacheSupported - * cudaDevAttrMaxSharedMemoryPerMultiprocessor = cudaDeviceAttr.cudaDevAttrMaxSharedMemoryPerMultiprocessor - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrGlobalL1CacheSupported)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 92, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrGlobalL1CacheSupporte, __pyx_t_4) < 0) __PYX_ERR(0, 92, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":93 - * cudaDevAttrStreamPrioritiesSupported = cudaDeviceAttr.cudaDevAttrStreamPrioritiesSupported - * cudaDevAttrGlobalL1CacheSupported = cudaDeviceAttr.cudaDevAttrGlobalL1CacheSupported - * cudaDevAttrLocalL1CacheSupported = cudaDeviceAttr.cudaDevAttrLocalL1CacheSupported # <<<<<<<<<<<<<< - * cudaDevAttrMaxSharedMemoryPerMultiprocessor = cudaDeviceAttr.cudaDevAttrMaxSharedMemoryPerMultiprocessor - * cudaDevAttrMaxRegistersPerMultiprocessor = cudaDeviceAttr.cudaDevAttrMaxRegistersPerMultiprocessor - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrLocalL1CacheSupported)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 93, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrLocalL1CacheSupported, __pyx_t_4) < 0) __PYX_ERR(0, 93, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":94 - * cudaDevAttrGlobalL1CacheSupported = cudaDeviceAttr.cudaDevAttrGlobalL1CacheSupported - * cudaDevAttrLocalL1CacheSupported = cudaDeviceAttr.cudaDevAttrLocalL1CacheSupported - * cudaDevAttrMaxSharedMemoryPerMultiprocessor = cudaDeviceAttr.cudaDevAttrMaxSharedMemoryPerMultiprocessor # <<<<<<<<<<<<<< - * cudaDevAttrMaxRegistersPerMultiprocessor = cudaDeviceAttr.cudaDevAttrMaxRegistersPerMultiprocessor - * cudaDevAttrManagedMemory = cudaDeviceAttr.cudaDevAttrManagedMemory - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxSharedMemoryPerMultiprocessor)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 94, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxSharedMemoryPerMul, __pyx_t_4) < 0) __PYX_ERR(0, 94, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":95 - * cudaDevAttrLocalL1CacheSupported = cudaDeviceAttr.cudaDevAttrLocalL1CacheSupported - * cudaDevAttrMaxSharedMemoryPerMultiprocessor = cudaDeviceAttr.cudaDevAttrMaxSharedMemoryPerMultiprocessor - * cudaDevAttrMaxRegistersPerMultiprocessor = cudaDeviceAttr.cudaDevAttrMaxRegistersPerMultiprocessor # <<<<<<<<<<<<<< - * cudaDevAttrManagedMemory = cudaDeviceAttr.cudaDevAttrManagedMemory - * cudaDevAttrIsMultiGpuBoard = cudaDeviceAttr.cudaDevAttrIsMultiGpuBoard - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxRegistersPerMultiprocessor)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 95, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxRegistersPerMultip, __pyx_t_4) < 0) __PYX_ERR(0, 95, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":96 - * cudaDevAttrMaxSharedMemoryPerMultiprocessor = cudaDeviceAttr.cudaDevAttrMaxSharedMemoryPerMultiprocessor - * cudaDevAttrMaxRegistersPerMultiprocessor = cudaDeviceAttr.cudaDevAttrMaxRegistersPerMultiprocessor - * cudaDevAttrManagedMemory = cudaDeviceAttr.cudaDevAttrManagedMemory # <<<<<<<<<<<<<< - * cudaDevAttrIsMultiGpuBoard = cudaDeviceAttr.cudaDevAttrIsMultiGpuBoard - * cudaDevAttrMultiGpuBoardGroupID = cudaDeviceAttr.cudaDevAttrMultiGpuBoardGroupID - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrManagedMemory)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 96, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrManagedMemory, __pyx_t_4) < 0) __PYX_ERR(0, 96, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":97 - * cudaDevAttrMaxRegistersPerMultiprocessor = cudaDeviceAttr.cudaDevAttrMaxRegistersPerMultiprocessor - * cudaDevAttrManagedMemory = cudaDeviceAttr.cudaDevAttrManagedMemory - * cudaDevAttrIsMultiGpuBoard = cudaDeviceAttr.cudaDevAttrIsMultiGpuBoard # <<<<<<<<<<<<<< - * cudaDevAttrMultiGpuBoardGroupID = cudaDeviceAttr.cudaDevAttrMultiGpuBoardGroupID - * cudaDevAttrHostNativeAtomicSupported = cudaDeviceAttr.cudaDevAttrHostNativeAtomicSupported - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrIsMultiGpuBoard)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 97, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrIsMultiGpuBoard, __pyx_t_4) < 0) __PYX_ERR(0, 97, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":98 - * cudaDevAttrManagedMemory = cudaDeviceAttr.cudaDevAttrManagedMemory - * cudaDevAttrIsMultiGpuBoard = cudaDeviceAttr.cudaDevAttrIsMultiGpuBoard - * cudaDevAttrMultiGpuBoardGroupID = cudaDeviceAttr.cudaDevAttrMultiGpuBoardGroupID # <<<<<<<<<<<<<< - * cudaDevAttrHostNativeAtomicSupported = cudaDeviceAttr.cudaDevAttrHostNativeAtomicSupported - * cudaDevAttrSingleToDoublePrecisionPerfRatio = cudaDeviceAttr.cudaDevAttrSingleToDoublePrecisionPerfRatio - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMultiGpuBoardGroupID)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 98, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMultiGpuBoardGroupID, __pyx_t_4) < 0) __PYX_ERR(0, 98, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":99 - * cudaDevAttrIsMultiGpuBoard = cudaDeviceAttr.cudaDevAttrIsMultiGpuBoard - * cudaDevAttrMultiGpuBoardGroupID = cudaDeviceAttr.cudaDevAttrMultiGpuBoardGroupID - * cudaDevAttrHostNativeAtomicSupported = cudaDeviceAttr.cudaDevAttrHostNativeAtomicSupported # <<<<<<<<<<<<<< - * cudaDevAttrSingleToDoublePrecisionPerfRatio = cudaDeviceAttr.cudaDevAttrSingleToDoublePrecisionPerfRatio - * cudaDevAttrPageableMemoryAccess = cudaDeviceAttr.cudaDevAttrPageableMemoryAccess - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrHostNativeAtomicSupported)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 99, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrHostNativeAtomicSuppo, __pyx_t_4) < 0) __PYX_ERR(0, 99, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":100 - * cudaDevAttrMultiGpuBoardGroupID = cudaDeviceAttr.cudaDevAttrMultiGpuBoardGroupID - * cudaDevAttrHostNativeAtomicSupported = cudaDeviceAttr.cudaDevAttrHostNativeAtomicSupported - * cudaDevAttrSingleToDoublePrecisionPerfRatio = cudaDeviceAttr.cudaDevAttrSingleToDoublePrecisionPerfRatio # <<<<<<<<<<<<<< - * cudaDevAttrPageableMemoryAccess = cudaDeviceAttr.cudaDevAttrPageableMemoryAccess - * cudaDevAttrConcurrentManagedAccess = cudaDeviceAttr.cudaDevAttrConcurrentManagedAccess - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrSingleToDoublePrecisionPerfRatio)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 100, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrSingleToDoublePrecisi, __pyx_t_4) < 0) __PYX_ERR(0, 100, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":101 - * cudaDevAttrHostNativeAtomicSupported = cudaDeviceAttr.cudaDevAttrHostNativeAtomicSupported - * cudaDevAttrSingleToDoublePrecisionPerfRatio = cudaDeviceAttr.cudaDevAttrSingleToDoublePrecisionPerfRatio - * cudaDevAttrPageableMemoryAccess = cudaDeviceAttr.cudaDevAttrPageableMemoryAccess # <<<<<<<<<<<<<< - * cudaDevAttrConcurrentManagedAccess = cudaDeviceAttr.cudaDevAttrConcurrentManagedAccess - * cudaDevAttrComputePreemptionSupported = cudaDeviceAttr.cudaDevAttrComputePreemptionSupported - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrPageableMemoryAccess)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 101, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrPageableMemoryAccess, __pyx_t_4) < 0) __PYX_ERR(0, 101, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":102 - * cudaDevAttrSingleToDoublePrecisionPerfRatio = cudaDeviceAttr.cudaDevAttrSingleToDoublePrecisionPerfRatio - * cudaDevAttrPageableMemoryAccess = cudaDeviceAttr.cudaDevAttrPageableMemoryAccess - * cudaDevAttrConcurrentManagedAccess = cudaDeviceAttr.cudaDevAttrConcurrentManagedAccess # <<<<<<<<<<<<<< - * cudaDevAttrComputePreemptionSupported = cudaDeviceAttr.cudaDevAttrComputePreemptionSupported - * cudaDevAttrCanUseHostPointerForRegisteredMem = cudaDeviceAttr.cudaDevAttrCanUseHostPointerForRegisteredMem - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrConcurrentManagedAccess)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 102, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrConcurrentManagedAcce, __pyx_t_4) < 0) __PYX_ERR(0, 102, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":103 - * cudaDevAttrPageableMemoryAccess = cudaDeviceAttr.cudaDevAttrPageableMemoryAccess - * cudaDevAttrConcurrentManagedAccess = cudaDeviceAttr.cudaDevAttrConcurrentManagedAccess - * cudaDevAttrComputePreemptionSupported = cudaDeviceAttr.cudaDevAttrComputePreemptionSupported # <<<<<<<<<<<<<< - * cudaDevAttrCanUseHostPointerForRegisteredMem = cudaDeviceAttr.cudaDevAttrCanUseHostPointerForRegisteredMem - * cudaDevAttrReserved92 = cudaDeviceAttr.cudaDevAttrReserved92 - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrComputePreemptionSupported)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 103, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrComputePreemptionSupp, __pyx_t_4) < 0) __PYX_ERR(0, 103, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":104 - * cudaDevAttrConcurrentManagedAccess = cudaDeviceAttr.cudaDevAttrConcurrentManagedAccess - * cudaDevAttrComputePreemptionSupported = cudaDeviceAttr.cudaDevAttrComputePreemptionSupported - * cudaDevAttrCanUseHostPointerForRegisteredMem = cudaDeviceAttr.cudaDevAttrCanUseHostPointerForRegisteredMem # <<<<<<<<<<<<<< - * cudaDevAttrReserved92 = cudaDeviceAttr.cudaDevAttrReserved92 - * cudaDevAttrReserved93 = cudaDeviceAttr.cudaDevAttrReserved93 - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrCanUseHostPointerForRegisteredMem)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 104, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrCanUseHostPointerForR, __pyx_t_4) < 0) __PYX_ERR(0, 104, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":105 - * cudaDevAttrComputePreemptionSupported = cudaDeviceAttr.cudaDevAttrComputePreemptionSupported - * cudaDevAttrCanUseHostPointerForRegisteredMem = cudaDeviceAttr.cudaDevAttrCanUseHostPointerForRegisteredMem - * cudaDevAttrReserved92 = cudaDeviceAttr.cudaDevAttrReserved92 # <<<<<<<<<<<<<< - * cudaDevAttrReserved93 = cudaDeviceAttr.cudaDevAttrReserved93 - * cudaDevAttrReserved94 = cudaDeviceAttr.cudaDevAttrReserved94 - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrReserved92)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 105, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrReserved92, __pyx_t_4) < 0) __PYX_ERR(0, 105, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":106 - * cudaDevAttrCanUseHostPointerForRegisteredMem = cudaDeviceAttr.cudaDevAttrCanUseHostPointerForRegisteredMem - * cudaDevAttrReserved92 = cudaDeviceAttr.cudaDevAttrReserved92 - * cudaDevAttrReserved93 = cudaDeviceAttr.cudaDevAttrReserved93 # <<<<<<<<<<<<<< - * cudaDevAttrReserved94 = cudaDeviceAttr.cudaDevAttrReserved94 - * cudaDevAttrCooperativeLaunch = cudaDeviceAttr.cudaDevAttrCooperativeLaunch - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrReserved93)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 106, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrReserved93, __pyx_t_4) < 0) __PYX_ERR(0, 106, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":107 - * cudaDevAttrReserved92 = cudaDeviceAttr.cudaDevAttrReserved92 - * cudaDevAttrReserved93 = cudaDeviceAttr.cudaDevAttrReserved93 - * cudaDevAttrReserved94 = cudaDeviceAttr.cudaDevAttrReserved94 # <<<<<<<<<<<<<< - * cudaDevAttrCooperativeLaunch = cudaDeviceAttr.cudaDevAttrCooperativeLaunch - * cudaDevAttrCooperativeMultiDeviceLaunch = cudaDeviceAttr.cudaDevAttrCooperativeMultiDeviceLaunch - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrReserved94)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 107, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrReserved94, __pyx_t_4) < 0) __PYX_ERR(0, 107, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":108 - * cudaDevAttrReserved93 = cudaDeviceAttr.cudaDevAttrReserved93 - * cudaDevAttrReserved94 = cudaDeviceAttr.cudaDevAttrReserved94 - * cudaDevAttrCooperativeLaunch = cudaDeviceAttr.cudaDevAttrCooperativeLaunch # <<<<<<<<<<<<<< - * cudaDevAttrCooperativeMultiDeviceLaunch = cudaDeviceAttr.cudaDevAttrCooperativeMultiDeviceLaunch - * cudaDevAttrMaxSharedMemoryPerBlockOptin = cudaDeviceAttr.cudaDevAttrMaxSharedMemoryPerBlockOptin - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrCooperativeLaunch)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 108, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrCooperativeLaunch, __pyx_t_4) < 0) __PYX_ERR(0, 108, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":109 - * cudaDevAttrReserved94 = cudaDeviceAttr.cudaDevAttrReserved94 - * cudaDevAttrCooperativeLaunch = cudaDeviceAttr.cudaDevAttrCooperativeLaunch - * cudaDevAttrCooperativeMultiDeviceLaunch = cudaDeviceAttr.cudaDevAttrCooperativeMultiDeviceLaunch # <<<<<<<<<<<<<< - * cudaDevAttrMaxSharedMemoryPerBlockOptin = cudaDeviceAttr.cudaDevAttrMaxSharedMemoryPerBlockOptin - * cudaDevAttrCanFlushRemoteWrites = cudaDeviceAttr.cudaDevAttrCanFlushRemoteWrites - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrCooperativeMultiDeviceLaunch)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 109, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrCooperativeMultiDevic, __pyx_t_4) < 0) __PYX_ERR(0, 109, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":110 - * cudaDevAttrCooperativeLaunch = cudaDeviceAttr.cudaDevAttrCooperativeLaunch - * cudaDevAttrCooperativeMultiDeviceLaunch = cudaDeviceAttr.cudaDevAttrCooperativeMultiDeviceLaunch - * cudaDevAttrMaxSharedMemoryPerBlockOptin = cudaDeviceAttr.cudaDevAttrMaxSharedMemoryPerBlockOptin # <<<<<<<<<<<<<< - * cudaDevAttrCanFlushRemoteWrites = cudaDeviceAttr.cudaDevAttrCanFlushRemoteWrites - * cudaDevAttrHostRegisterSupported = cudaDeviceAttr.cudaDevAttrHostRegisterSupported - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrMaxSharedMemoryPerBlockOptin)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 110, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrMaxSharedMemoryPerBlo_2, __pyx_t_4) < 0) __PYX_ERR(0, 110, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":111 - * cudaDevAttrCooperativeMultiDeviceLaunch = cudaDeviceAttr.cudaDevAttrCooperativeMultiDeviceLaunch - * cudaDevAttrMaxSharedMemoryPerBlockOptin = cudaDeviceAttr.cudaDevAttrMaxSharedMemoryPerBlockOptin - * cudaDevAttrCanFlushRemoteWrites = cudaDeviceAttr.cudaDevAttrCanFlushRemoteWrites # <<<<<<<<<<<<<< - * cudaDevAttrHostRegisterSupported = cudaDeviceAttr.cudaDevAttrHostRegisterSupported - * cudaDevAttrPageableMemoryAccessUsesHostPageTables = cudaDeviceAttr.cudaDevAttrPageableMemoryAccessUsesHostPageTables - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrCanFlushRemoteWrites)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 111, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrCanFlushRemoteWrites, __pyx_t_4) < 0) __PYX_ERR(0, 111, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":112 - * cudaDevAttrMaxSharedMemoryPerBlockOptin = cudaDeviceAttr.cudaDevAttrMaxSharedMemoryPerBlockOptin - * cudaDevAttrCanFlushRemoteWrites = cudaDeviceAttr.cudaDevAttrCanFlushRemoteWrites - * cudaDevAttrHostRegisterSupported = cudaDeviceAttr.cudaDevAttrHostRegisterSupported # <<<<<<<<<<<<<< - * cudaDevAttrPageableMemoryAccessUsesHostPageTables = cudaDeviceAttr.cudaDevAttrPageableMemoryAccessUsesHostPageTables - * cudaDevAttrDirectManagedMemAccessFromHost = cudaDeviceAttr.cudaDevAttrDirectManagedMemAccessFromHost - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrHostRegisterSupported)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 112, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrHostRegisterSupported, __pyx_t_4) < 0) __PYX_ERR(0, 112, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":113 - * cudaDevAttrCanFlushRemoteWrites = cudaDeviceAttr.cudaDevAttrCanFlushRemoteWrites - * cudaDevAttrHostRegisterSupported = cudaDeviceAttr.cudaDevAttrHostRegisterSupported - * cudaDevAttrPageableMemoryAccessUsesHostPageTables = cudaDeviceAttr.cudaDevAttrPageableMemoryAccessUsesHostPageTables # <<<<<<<<<<<<<< - * cudaDevAttrDirectManagedMemAccessFromHost = cudaDeviceAttr.cudaDevAttrDirectManagedMemAccessFromHost - * - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrPageableMemoryAccessUsesHostPageTables)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 113, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrPageableMemoryAccessU, __pyx_t_4) < 0) __PYX_ERR(0, 113, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":114 - * cudaDevAttrHostRegisterSupported = cudaDeviceAttr.cudaDevAttrHostRegisterSupported - * cudaDevAttrPageableMemoryAccessUsesHostPageTables = cudaDeviceAttr.cudaDevAttrPageableMemoryAccessUsesHostPageTables - * cudaDevAttrDirectManagedMemAccessFromHost = cudaDeviceAttr.cudaDevAttrDirectManagedMemAccessFromHost # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_4 = __Pyx_PyInt_From_int(((__pyx_t_4cudf_5_cuda_3gpu_underlying_type_attribute)cudaDevAttrDirectManagedMemAccessFromHost)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 114, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_SetNameInClass(__pyx_t_3, __pyx_n_s_cudaDevAttrDirectManagedMemAcces, __pyx_t_4) < 0) __PYX_ERR(0, 114, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "cudf/_cuda/gpu.pyx":14 - * - * - * class CudaDeviceAttr(IntEnum): # <<<<<<<<<<<<<< - * cudaDevAttrMaxThreadsPerBlock = cudaDeviceAttr.cudaDevAttrMaxThreadsPerBlock - * cudaDevAttrMaxBlockDimX = cudaDeviceAttr.cudaDevAttrMaxBlockDimX - */ - __pyx_t_4 = __Pyx_Py3ClassCreate(__pyx_t_2, __pyx_n_s_CudaDeviceAttr, __pyx_t_1, __pyx_t_3, NULL, 0, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_CudaDeviceAttr, __pyx_t_4) < 0) __PYX_ERR(0, 14, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "cudf/_cuda/gpu.pyx":1 - * # Copyright (c) 2020, NVIDIA CORPORATION. # <<<<<<<<<<<<<< - * - * from cudf._cuda.gpu cimport ( - */ - __pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /*--- Wrapped vars code ---*/ - - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - if (__pyx_m) { - if (__pyx_d) { - __Pyx_AddTraceback("init cudf._cuda.gpu", __pyx_clineno, __pyx_lineno, __pyx_filename); - } - Py_CLEAR(__pyx_m); - } else if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_ImportError, "init cudf._cuda.gpu"); - } - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - #if CYTHON_PEP489_MULTI_PHASE_INIT - return (__pyx_m != NULL) ? 0 : -1; - #elif PY_MAJOR_VERSION >= 3 - return __pyx_m; - #else - return; - #endif -} - -/* --- Runtime support code --- */ -/* Refnanny */ -#if CYTHON_REFNANNY -static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { - PyObject *m = NULL, *p = NULL; - void *r = NULL; - m = PyImport_ImportModule(modname); - if (!m) goto end; - p = PyObject_GetAttrString(m, "RefNannyAPI"); - if (!p) goto end; - r = PyLong_AsVoidPtr(p); -end: - Py_XDECREF(p); - Py_XDECREF(m); - return (__Pyx_RefNannyAPIStruct *)r; -} -#endif - -/* RaiseArgTupleInvalid */ -static void __Pyx_RaiseArgtupleInvalid( - const char* func_name, - int exact, - Py_ssize_t num_min, - Py_ssize_t num_max, - Py_ssize_t num_found) -{ - Py_ssize_t num_expected; - const char *more_or_less; - if (num_found < num_min) { - num_expected = num_min; - more_or_less = "at least"; - } else { - num_expected = num_max; - more_or_less = "at most"; - } - if (exact) { - more_or_less = "exactly"; - } - PyErr_Format(PyExc_TypeError, - "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", - func_name, more_or_less, num_expected, - (num_expected == 1) ? "" : "s", num_found); -} - -/* RaiseDoubleKeywords */ -static void __Pyx_RaiseDoubleKeywordsError( - const char* func_name, - PyObject* kw_name) -{ - PyErr_Format(PyExc_TypeError, - #if PY_MAJOR_VERSION >= 3 - "%s() got multiple values for keyword argument '%U'", func_name, kw_name); - #else - "%s() got multiple values for keyword argument '%s'", func_name, - PyString_AsString(kw_name)); - #endif -} - -/* ParseKeywords */ -static int __Pyx_ParseOptionalKeywords( - PyObject *kwds, - PyObject **argnames[], - PyObject *kwds2, - PyObject *values[], - Py_ssize_t num_pos_args, - const char* function_name) -{ - PyObject *key = 0, *value = 0; - Py_ssize_t pos = 0; - PyObject*** name; - PyObject*** first_kw_arg = argnames + num_pos_args; - while (PyDict_Next(kwds, &pos, &key, &value)) { - name = first_kw_arg; - while (*name && (**name != key)) name++; - if (*name) { - values[name-argnames] = value; - continue; - } - name = first_kw_arg; - #if PY_MAJOR_VERSION < 3 - if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) { - while (*name) { - if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) - && _PyString_Eq(**name, key)) { - values[name-argnames] = value; - break; - } - name++; - } - if (*name) continue; - else { - PyObject*** argname = argnames; - while (argname != first_kw_arg) { - if ((**argname == key) || ( - (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) - && _PyString_Eq(**argname, key))) { - goto arg_passed_twice; - } - argname++; - } - } - } else - #endif - if (likely(PyUnicode_Check(key))) { - while (*name) { - int cmp = (**name == key) ? 0 : - #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 - (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : - #endif - PyUnicode_Compare(**name, key); - if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; - if (cmp == 0) { - values[name-argnames] = value; - break; - } - name++; - } - if (*name) continue; - else { - PyObject*** argname = argnames; - while (argname != first_kw_arg) { - int cmp = (**argname == key) ? 0 : - #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 - (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : - #endif - PyUnicode_Compare(**argname, key); - if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; - if (cmp == 0) goto arg_passed_twice; - argname++; - } - } - } else - goto invalid_keyword_type; - if (kwds2) { - if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; - } else { - goto invalid_keyword; - } - } - return 0; -arg_passed_twice: - __Pyx_RaiseDoubleKeywordsError(function_name, key); - goto bad; -invalid_keyword_type: - PyErr_Format(PyExc_TypeError, - "%.200s() keywords must be strings", function_name); - goto bad; -invalid_keyword: - PyErr_Format(PyExc_TypeError, - #if PY_MAJOR_VERSION < 3 - "%.200s() got an unexpected keyword argument '%.200s'", - function_name, PyString_AsString(key)); - #else - "%s() got an unexpected keyword argument '%U'", - function_name, key); - #endif -bad: - return -1; -} - -/* PyObjectGetAttrStr */ -#if CYTHON_USE_TYPE_SLOTS -static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { - PyTypeObject* tp = Py_TYPE(obj); - if (likely(tp->tp_getattro)) - return tp->tp_getattro(obj, attr_name); -#if PY_MAJOR_VERSION < 3 - if (likely(tp->tp_getattr)) - return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); -#endif - return PyObject_GetAttr(obj, attr_name); -} -#endif - -/* Import */ -static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { - PyObject *empty_list = 0; - PyObject *module = 0; - PyObject *global_dict = 0; - PyObject *empty_dict = 0; - PyObject *list; - #if PY_MAJOR_VERSION < 3 - PyObject *py_import; - py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); - if (!py_import) - goto bad; - #endif - if (from_list) - list = from_list; - else { - empty_list = PyList_New(0); - if (!empty_list) - goto bad; - list = empty_list; - } - global_dict = PyModule_GetDict(__pyx_m); - if (!global_dict) - goto bad; - empty_dict = PyDict_New(); - if (!empty_dict) - goto bad; - { - #if PY_MAJOR_VERSION >= 3 - if (level == -1) { - if (strchr(__Pyx_MODULE_NAME, '.')) { - module = PyImport_ImportModuleLevelObject( - name, global_dict, empty_dict, list, 1); - if (!module) { - if (!PyErr_ExceptionMatches(PyExc_ImportError)) - goto bad; - PyErr_Clear(); - } - } - level = 0; - } - #endif - if (!module) { - #if PY_MAJOR_VERSION < 3 - PyObject *py_level = PyInt_FromLong(level); - if (!py_level) - goto bad; - module = PyObject_CallFunctionObjArgs(py_import, - name, global_dict, empty_dict, list, py_level, (PyObject *)NULL); - Py_DECREF(py_level); - #else - module = PyImport_ImportModuleLevelObject( - name, global_dict, empty_dict, list, level); - #endif - } - } -bad: - #if PY_MAJOR_VERSION < 3 - Py_XDECREF(py_import); - #endif - Py_XDECREF(empty_list); - Py_XDECREF(empty_dict); - return module; -} - -/* ImportFrom */ -static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) { - PyObject* value = __Pyx_PyObject_GetAttrStr(module, name); - if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) { - PyErr_Format(PyExc_ImportError, - #if PY_MAJOR_VERSION < 3 - "cannot import name %.230s", PyString_AS_STRING(name)); - #else - "cannot import name %S", name); - #endif - } - return value; -} - -/* GetBuiltinName */ -static PyObject *__Pyx_GetBuiltinName(PyObject *name) { - PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); - if (unlikely(!result)) { - PyErr_Format(PyExc_NameError, -#if PY_MAJOR_VERSION >= 3 - "name '%U' is not defined", name); -#else - "name '%.200s' is not defined", PyString_AS_STRING(name)); -#endif - } - return result; -} - -/* PyDictVersioning */ -#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS -static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) { - PyObject *dict = Py_TYPE(obj)->tp_dict; - return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0; -} -static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) { - PyObject **dictptr = NULL; - Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset; - if (offset) { -#if CYTHON_COMPILING_IN_CPYTHON - dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj); -#else - dictptr = _PyObject_GetDictPtr(obj); -#endif - } - return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0; -} -static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) { - PyObject *dict = Py_TYPE(obj)->tp_dict; - if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict))) - return 0; - return obj_dict_version == __Pyx_get_object_dict_version(obj); -} -#endif - -/* GetModuleGlobalName */ -#if CYTHON_USE_DICT_VERSIONS -static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value) -#else -static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name) -#endif -{ - PyObject *result; -#if !CYTHON_AVOID_BORROWED_REFS -#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 - result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash); - __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) - if (likely(result)) { - return __Pyx_NewRef(result); - } else if (unlikely(PyErr_Occurred())) { - return NULL; - } -#else - result = PyDict_GetItem(__pyx_d, name); - __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) - if (likely(result)) { - return __Pyx_NewRef(result); - } -#endif -#else - result = PyObject_GetItem(__pyx_d, name); - __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) - if (likely(result)) { - return __Pyx_NewRef(result); - } - PyErr_Clear(); -#endif - return __Pyx_GetBuiltinName(name); -} - -/* CalculateMetaclass */ -static PyObject *__Pyx_CalculateMetaclass(PyTypeObject *metaclass, PyObject *bases) { - Py_ssize_t i, nbases = PyTuple_GET_SIZE(bases); - for (i=0; i < nbases; i++) { - PyTypeObject *tmptype; - PyObject *tmp = PyTuple_GET_ITEM(bases, i); - tmptype = Py_TYPE(tmp); -#if PY_MAJOR_VERSION < 3 - if (tmptype == &PyClass_Type) - continue; -#endif - if (!metaclass) { - metaclass = tmptype; - continue; - } - if (PyType_IsSubtype(metaclass, tmptype)) - continue; - if (PyType_IsSubtype(tmptype, metaclass)) { - metaclass = tmptype; - continue; - } - PyErr_SetString(PyExc_TypeError, - "metaclass conflict: " - "the metaclass of a derived class " - "must be a (non-strict) subclass " - "of the metaclasses of all its bases"); - return NULL; - } - if (!metaclass) { -#if PY_MAJOR_VERSION < 3 - metaclass = &PyClass_Type; -#else - metaclass = &PyType_Type; -#endif - } - Py_INCREF((PyObject*) metaclass); - return (PyObject*) metaclass; -} - -/* Py3ClassCreate */ -static PyObject *__Pyx_Py3MetaclassPrepare(PyObject *metaclass, PyObject *bases, PyObject *name, - PyObject *qualname, PyObject *mkw, PyObject *modname, PyObject *doc) { - PyObject *ns; - if (metaclass) { - PyObject *prep = __Pyx_PyObject_GetAttrStr(metaclass, __pyx_n_s_prepare); - if (prep) { - PyObject *pargs = PyTuple_Pack(2, name, bases); - if (unlikely(!pargs)) { - Py_DECREF(prep); - return NULL; - } - ns = PyObject_Call(prep, pargs, mkw); - Py_DECREF(prep); - Py_DECREF(pargs); - } else { - if (unlikely(!PyErr_ExceptionMatches(PyExc_AttributeError))) - return NULL; - PyErr_Clear(); - ns = PyDict_New(); - } - } else { - ns = PyDict_New(); - } - if (unlikely(!ns)) - return NULL; - if (unlikely(PyObject_SetItem(ns, __pyx_n_s_module, modname) < 0)) goto bad; - if (unlikely(PyObject_SetItem(ns, __pyx_n_s_qualname, qualname) < 0)) goto bad; - if (unlikely(doc && PyObject_SetItem(ns, __pyx_n_s_doc, doc) < 0)) goto bad; - return ns; -bad: - Py_DECREF(ns); - return NULL; -} -static PyObject *__Pyx_Py3ClassCreate(PyObject *metaclass, PyObject *name, PyObject *bases, - PyObject *dict, PyObject *mkw, - int calculate_metaclass, int allow_py2_metaclass) { - PyObject *result, *margs; - PyObject *owned_metaclass = NULL; - if (allow_py2_metaclass) { - owned_metaclass = PyObject_GetItem(dict, __pyx_n_s_metaclass); - if (owned_metaclass) { - metaclass = owned_metaclass; - } else if (likely(PyErr_ExceptionMatches(PyExc_KeyError))) { - PyErr_Clear(); - } else { - return NULL; - } - } - if (calculate_metaclass && (!metaclass || PyType_Check(metaclass))) { - metaclass = __Pyx_CalculateMetaclass((PyTypeObject*) metaclass, bases); - Py_XDECREF(owned_metaclass); - if (unlikely(!metaclass)) - return NULL; - owned_metaclass = metaclass; - } - margs = PyTuple_Pack(3, name, bases, dict); - if (unlikely(!margs)) { - result = NULL; - } else { - result = PyObject_Call(metaclass, margs, mkw); - Py_DECREF(margs); - } - Py_XDECREF(owned_metaclass); - return result; -} - -/* PyErrFetchRestore */ -#if CYTHON_FAST_THREAD_STATE -static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { - PyObject *tmp_type, *tmp_value, *tmp_tb; - tmp_type = tstate->curexc_type; - tmp_value = tstate->curexc_value; - tmp_tb = tstate->curexc_traceback; - tstate->curexc_type = type; - tstate->curexc_value = value; - tstate->curexc_traceback = tb; - Py_XDECREF(tmp_type); - Py_XDECREF(tmp_value); - Py_XDECREF(tmp_tb); -} -static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { - *type = tstate->curexc_type; - *value = tstate->curexc_value; - *tb = tstate->curexc_traceback; - tstate->curexc_type = 0; - tstate->curexc_value = 0; - tstate->curexc_traceback = 0; -} -#endif - -/* CLineInTraceback */ -#ifndef CYTHON_CLINE_IN_TRACEBACK -static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line) { - PyObject *use_cline; - PyObject *ptype, *pvalue, *ptraceback; -#if CYTHON_COMPILING_IN_CPYTHON - PyObject **cython_runtime_dict; -#endif - if (unlikely(!__pyx_cython_runtime)) { - return c_line; - } - __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); -#if CYTHON_COMPILING_IN_CPYTHON - cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); - if (likely(cython_runtime_dict)) { - __PYX_PY_DICT_LOOKUP_IF_MODIFIED( - use_cline, *cython_runtime_dict, - __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback)) - } else -#endif - { - PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); - if (use_cline_obj) { - use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; - Py_DECREF(use_cline_obj); - } else { - PyErr_Clear(); - use_cline = NULL; - } - } - if (!use_cline) { - c_line = 0; - PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); - } - else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) { - c_line = 0; - } - __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); - return c_line; -} -#endif - -/* CodeObjectCache */ -static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { - int start = 0, mid = 0, end = count - 1; - if (end >= 0 && code_line > entries[end].code_line) { - return count; - } - while (start < end) { - mid = start + (end - start) / 2; - if (code_line < entries[mid].code_line) { - end = mid; - } else if (code_line > entries[mid].code_line) { - start = mid + 1; - } else { - return mid; - } - } - if (code_line <= entries[mid].code_line) { - return mid; - } else { - return mid + 1; - } -} -static PyCodeObject *__pyx_find_code_object(int code_line) { - PyCodeObject* code_object; - int pos; - if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { - return NULL; - } - pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); - if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { - return NULL; - } - code_object = __pyx_code_cache.entries[pos].code_object; - Py_INCREF(code_object); - return code_object; -} -static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { - int pos, i; - __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; - if (unlikely(!code_line)) { - return; - } - if (unlikely(!entries)) { - entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); - if (likely(entries)) { - __pyx_code_cache.entries = entries; - __pyx_code_cache.max_count = 64; - __pyx_code_cache.count = 1; - entries[0].code_line = code_line; - entries[0].code_object = code_object; - Py_INCREF(code_object); - } - return; - } - pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); - if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { - PyCodeObject* tmp = entries[pos].code_object; - entries[pos].code_object = code_object; - Py_DECREF(tmp); - return; - } - if (__pyx_code_cache.count == __pyx_code_cache.max_count) { - int new_max = __pyx_code_cache.max_count + 64; - entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( - __pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry)); - if (unlikely(!entries)) { - return; - } - __pyx_code_cache.entries = entries; - __pyx_code_cache.max_count = new_max; - } - for (i=__pyx_code_cache.count; i>pos; i--) { - entries[i] = entries[i-1]; - } - entries[pos].code_line = code_line; - entries[pos].code_object = code_object; - __pyx_code_cache.count++; - Py_INCREF(code_object); -} - -/* AddTraceback */ -#include "compile.h" -#include "frameobject.h" -#include "traceback.h" -static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( - const char *funcname, int c_line, - int py_line, const char *filename) { - PyCodeObject *py_code = 0; - PyObject *py_srcfile = 0; - PyObject *py_funcname = 0; - #if PY_MAJOR_VERSION < 3 - py_srcfile = PyString_FromString(filename); - #else - py_srcfile = PyUnicode_FromString(filename); - #endif - if (!py_srcfile) goto bad; - if (c_line) { - #if PY_MAJOR_VERSION < 3 - py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); - #else - py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); - #endif - } - else { - #if PY_MAJOR_VERSION < 3 - py_funcname = PyString_FromString(funcname); - #else - py_funcname = PyUnicode_FromString(funcname); - #endif - } - if (!py_funcname) goto bad; - py_code = __Pyx_PyCode_New( - 0, - 0, - 0, - 0, - 0, - __pyx_empty_bytes, /*PyObject *code,*/ - __pyx_empty_tuple, /*PyObject *consts,*/ - __pyx_empty_tuple, /*PyObject *names,*/ - __pyx_empty_tuple, /*PyObject *varnames,*/ - __pyx_empty_tuple, /*PyObject *freevars,*/ - __pyx_empty_tuple, /*PyObject *cellvars,*/ - py_srcfile, /*PyObject *filename,*/ - py_funcname, /*PyObject *name,*/ - py_line, - __pyx_empty_bytes /*PyObject *lnotab*/ - ); - Py_DECREF(py_srcfile); - Py_DECREF(py_funcname); - return py_code; -bad: - Py_XDECREF(py_srcfile); - Py_XDECREF(py_funcname); - return NULL; -} -static void __Pyx_AddTraceback(const char *funcname, int c_line, - int py_line, const char *filename) { - PyCodeObject *py_code = 0; - PyFrameObject *py_frame = 0; - PyThreadState *tstate = __Pyx_PyThreadState_Current; - if (c_line) { - c_line = __Pyx_CLineForTraceback(tstate, c_line); - } - py_code = __pyx_find_code_object(c_line ? -c_line : py_line); - if (!py_code) { - py_code = __Pyx_CreateCodeObjectForTraceback( - funcname, c_line, py_line, filename); - if (!py_code) goto bad; - __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); - } - py_frame = PyFrame_New( - tstate, /*PyThreadState *tstate,*/ - py_code, /*PyCodeObject *code,*/ - __pyx_d, /*PyObject *globals,*/ - 0 /*PyObject *locals*/ - ); - if (!py_frame) goto bad; - __Pyx_PyFrame_SetLineNumber(py_frame, py_line); - PyTraceBack_Here(py_frame); -bad: - Py_XDECREF(py_code); - Py_XDECREF(py_frame); -} - -/* CIntToPy */ -static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { - const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0; - const int is_unsigned = neg_one > const_zero; - if (is_unsigned) { - if (sizeof(int) < sizeof(long)) { - return PyInt_FromLong((long) value); - } else if (sizeof(int) <= sizeof(unsigned long)) { - return PyLong_FromUnsignedLong((unsigned long) value); -#ifdef HAVE_LONG_LONG - } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { - return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); -#endif - } - } else { - if (sizeof(int) <= sizeof(long)) { - return PyInt_FromLong((long) value); -#ifdef HAVE_LONG_LONG - } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { - return PyLong_FromLongLong((PY_LONG_LONG) value); -#endif - } - } - { - int one = 1; int little = (int)*(unsigned char *)&one; - unsigned char *bytes = (unsigned char *)&value; - return _PyLong_FromByteArray(bytes, sizeof(int), - little, !is_unsigned); - } -} - -/* CIntFromPyVerify */ -#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ - __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) -#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ - __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) -#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ - {\ - func_type value = func_value;\ - if (sizeof(target_type) < sizeof(func_type)) {\ - if (unlikely(value != (func_type) (target_type) value)) {\ - func_type zero = 0;\ - if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ - return (target_type) -1;\ - if (is_unsigned && unlikely(value < zero))\ - goto raise_neg_overflow;\ - else\ - goto raise_overflow;\ - }\ - }\ - return (target_type) value;\ - } - -/* CIntFromPy */ -static CYTHON_INLINE enum cudaDeviceAttr __Pyx_PyInt_As_enum__cudaDeviceAttr(PyObject *x) { - const enum cudaDeviceAttr neg_one = (enum cudaDeviceAttr) ((enum cudaDeviceAttr) 0 - (enum cudaDeviceAttr) 1), const_zero = (enum cudaDeviceAttr) 0; - const int is_unsigned = neg_one > const_zero; -#if PY_MAJOR_VERSION < 3 - if (likely(PyInt_Check(x))) { - if (sizeof(enum cudaDeviceAttr) < sizeof(long)) { - __PYX_VERIFY_RETURN_INT(enum cudaDeviceAttr, long, PyInt_AS_LONG(x)) - } else { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - goto raise_neg_overflow; - } - return (enum cudaDeviceAttr) val; - } - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { -#if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)x)->ob_digit; - switch (Py_SIZE(x)) { - case 0: return (enum cudaDeviceAttr) 0; - case 1: __PYX_VERIFY_RETURN_INT(enum cudaDeviceAttr, digit, digits[0]) - case 2: - if (8 * sizeof(enum cudaDeviceAttr) > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(enum cudaDeviceAttr, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(enum cudaDeviceAttr) >= 2 * PyLong_SHIFT) { - return (enum cudaDeviceAttr) (((((enum cudaDeviceAttr)digits[1]) << PyLong_SHIFT) | (enum cudaDeviceAttr)digits[0])); - } - } - break; - case 3: - if (8 * sizeof(enum cudaDeviceAttr) > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(enum cudaDeviceAttr, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(enum cudaDeviceAttr) >= 3 * PyLong_SHIFT) { - return (enum cudaDeviceAttr) (((((((enum cudaDeviceAttr)digits[2]) << PyLong_SHIFT) | (enum cudaDeviceAttr)digits[1]) << PyLong_SHIFT) | (enum cudaDeviceAttr)digits[0])); - } - } - break; - case 4: - if (8 * sizeof(enum cudaDeviceAttr) > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(enum cudaDeviceAttr, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(enum cudaDeviceAttr) >= 4 * PyLong_SHIFT) { - return (enum cudaDeviceAttr) (((((((((enum cudaDeviceAttr)digits[3]) << PyLong_SHIFT) | (enum cudaDeviceAttr)digits[2]) << PyLong_SHIFT) | (enum cudaDeviceAttr)digits[1]) << PyLong_SHIFT) | (enum cudaDeviceAttr)digits[0])); - } - } - break; - } -#endif -#if CYTHON_COMPILING_IN_CPYTHON - if (unlikely(Py_SIZE(x) < 0)) { - goto raise_neg_overflow; - } -#else - { - int result = PyObject_RichCompareBool(x, Py_False, Py_LT); - if (unlikely(result < 0)) - return (enum cudaDeviceAttr) -1; - if (unlikely(result == 1)) - goto raise_neg_overflow; - } -#endif - if (sizeof(enum cudaDeviceAttr) <= sizeof(unsigned long)) { - __PYX_VERIFY_RETURN_INT_EXC(enum cudaDeviceAttr, unsigned long, PyLong_AsUnsignedLong(x)) -#ifdef HAVE_LONG_LONG - } else if (sizeof(enum cudaDeviceAttr) <= sizeof(unsigned PY_LONG_LONG)) { - __PYX_VERIFY_RETURN_INT_EXC(enum cudaDeviceAttr, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) -#endif - } - } else { -#if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)x)->ob_digit; - switch (Py_SIZE(x)) { - case 0: return (enum cudaDeviceAttr) 0; - case -1: __PYX_VERIFY_RETURN_INT(enum cudaDeviceAttr, sdigit, (sdigit) (-(sdigit)digits[0])) - case 1: __PYX_VERIFY_RETURN_INT(enum cudaDeviceAttr, digit, +digits[0]) - case -2: - if (8 * sizeof(enum cudaDeviceAttr) - 1 > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(enum cudaDeviceAttr, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(enum cudaDeviceAttr) - 1 > 2 * PyLong_SHIFT) { - return (enum cudaDeviceAttr) (((enum cudaDeviceAttr)-1)*(((((enum cudaDeviceAttr)digits[1]) << PyLong_SHIFT) | (enum cudaDeviceAttr)digits[0]))); - } - } - break; - case 2: - if (8 * sizeof(enum cudaDeviceAttr) > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(enum cudaDeviceAttr, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(enum cudaDeviceAttr) - 1 > 2 * PyLong_SHIFT) { - return (enum cudaDeviceAttr) ((((((enum cudaDeviceAttr)digits[1]) << PyLong_SHIFT) | (enum cudaDeviceAttr)digits[0]))); - } - } - break; - case -3: - if (8 * sizeof(enum cudaDeviceAttr) - 1 > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(enum cudaDeviceAttr, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(enum cudaDeviceAttr) - 1 > 3 * PyLong_SHIFT) { - return (enum cudaDeviceAttr) (((enum cudaDeviceAttr)-1)*(((((((enum cudaDeviceAttr)digits[2]) << PyLong_SHIFT) | (enum cudaDeviceAttr)digits[1]) << PyLong_SHIFT) | (enum cudaDeviceAttr)digits[0]))); - } - } - break; - case 3: - if (8 * sizeof(enum cudaDeviceAttr) > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(enum cudaDeviceAttr, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(enum cudaDeviceAttr) - 1 > 3 * PyLong_SHIFT) { - return (enum cudaDeviceAttr) ((((((((enum cudaDeviceAttr)digits[2]) << PyLong_SHIFT) | (enum cudaDeviceAttr)digits[1]) << PyLong_SHIFT) | (enum cudaDeviceAttr)digits[0]))); - } - } - break; - case -4: - if (8 * sizeof(enum cudaDeviceAttr) - 1 > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(enum cudaDeviceAttr, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(enum cudaDeviceAttr) - 1 > 4 * PyLong_SHIFT) { - return (enum cudaDeviceAttr) (((enum cudaDeviceAttr)-1)*(((((((((enum cudaDeviceAttr)digits[3]) << PyLong_SHIFT) | (enum cudaDeviceAttr)digits[2]) << PyLong_SHIFT) | (enum cudaDeviceAttr)digits[1]) << PyLong_SHIFT) | (enum cudaDeviceAttr)digits[0]))); - } - } - break; - case 4: - if (8 * sizeof(enum cudaDeviceAttr) > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(enum cudaDeviceAttr, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(enum cudaDeviceAttr) - 1 > 4 * PyLong_SHIFT) { - return (enum cudaDeviceAttr) ((((((((((enum cudaDeviceAttr)digits[3]) << PyLong_SHIFT) | (enum cudaDeviceAttr)digits[2]) << PyLong_SHIFT) | (enum cudaDeviceAttr)digits[1]) << PyLong_SHIFT) | (enum cudaDeviceAttr)digits[0]))); - } - } - break; - } -#endif - if (sizeof(enum cudaDeviceAttr) <= sizeof(long)) { - __PYX_VERIFY_RETURN_INT_EXC(enum cudaDeviceAttr, long, PyLong_AsLong(x)) -#ifdef HAVE_LONG_LONG - } else if (sizeof(enum cudaDeviceAttr) <= sizeof(PY_LONG_LONG)) { - __PYX_VERIFY_RETURN_INT_EXC(enum cudaDeviceAttr, PY_LONG_LONG, PyLong_AsLongLong(x)) -#endif - } - } - { -#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) - PyErr_SetString(PyExc_RuntimeError, - "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); -#else - enum cudaDeviceAttr val; - PyObject *v = __Pyx_PyNumber_IntOrLong(x); - #if PY_MAJOR_VERSION < 3 - if (likely(v) && !PyLong_Check(v)) { - PyObject *tmp = v; - v = PyNumber_Long(tmp); - Py_DECREF(tmp); - } - #endif - if (likely(v)) { - int one = 1; int is_little = (int)*(unsigned char *)&one; - unsigned char *bytes = (unsigned char *)&val; - int ret = _PyLong_AsByteArray((PyLongObject *)v, - bytes, sizeof(val), - is_little, !is_unsigned); - Py_DECREF(v); - if (likely(!ret)) - return val; - } -#endif - return (enum cudaDeviceAttr) -1; - } - } else { - enum cudaDeviceAttr val; - PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); - if (!tmp) return (enum cudaDeviceAttr) -1; - val = __Pyx_PyInt_As_enum__cudaDeviceAttr(tmp); - Py_DECREF(tmp); - return val; - } -raise_overflow: - PyErr_SetString(PyExc_OverflowError, - "value too large to convert to enum cudaDeviceAttr"); - return (enum cudaDeviceAttr) -1; -raise_neg_overflow: - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to enum cudaDeviceAttr"); - return (enum cudaDeviceAttr) -1; -} - -/* CIntFromPy */ -static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { - const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0; - const int is_unsigned = neg_one > const_zero; -#if PY_MAJOR_VERSION < 3 - if (likely(PyInt_Check(x))) { - if (sizeof(int) < sizeof(long)) { - __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) - } else { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - goto raise_neg_overflow; - } - return (int) val; - } - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { -#if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)x)->ob_digit; - switch (Py_SIZE(x)) { - case 0: return (int) 0; - case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) - case 2: - if (8 * sizeof(int) > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { - return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); - } - } - break; - case 3: - if (8 * sizeof(int) > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { - return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); - } - } - break; - case 4: - if (8 * sizeof(int) > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { - return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); - } - } - break; - } -#endif -#if CYTHON_COMPILING_IN_CPYTHON - if (unlikely(Py_SIZE(x) < 0)) { - goto raise_neg_overflow; - } -#else - { - int result = PyObject_RichCompareBool(x, Py_False, Py_LT); - if (unlikely(result < 0)) - return (int) -1; - if (unlikely(result == 1)) - goto raise_neg_overflow; - } -#endif - if (sizeof(int) <= sizeof(unsigned long)) { - __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) -#ifdef HAVE_LONG_LONG - } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { - __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) -#endif - } - } else { -#if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)x)->ob_digit; - switch (Py_SIZE(x)) { - case 0: return (int) 0; - case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) - case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) - case -2: - if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { - return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - case 2: - if (8 * sizeof(int) > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { - return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - case -3: - if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { - return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - case 3: - if (8 * sizeof(int) > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { - return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - case -4: - if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { - return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - case 4: - if (8 * sizeof(int) > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { - return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - } -#endif - if (sizeof(int) <= sizeof(long)) { - __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) -#ifdef HAVE_LONG_LONG - } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { - __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) -#endif - } - } - { -#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) - PyErr_SetString(PyExc_RuntimeError, - "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); -#else - int val; - PyObject *v = __Pyx_PyNumber_IntOrLong(x); - #if PY_MAJOR_VERSION < 3 - if (likely(v) && !PyLong_Check(v)) { - PyObject *tmp = v; - v = PyNumber_Long(tmp); - Py_DECREF(tmp); - } - #endif - if (likely(v)) { - int one = 1; int is_little = (int)*(unsigned char *)&one; - unsigned char *bytes = (unsigned char *)&val; - int ret = _PyLong_AsByteArray((PyLongObject *)v, - bytes, sizeof(val), - is_little, !is_unsigned); - Py_DECREF(v); - if (likely(!ret)) - return val; - } -#endif - return (int) -1; - } - } else { - int val; - PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); - if (!tmp) return (int) -1; - val = __Pyx_PyInt_As_int(tmp); - Py_DECREF(tmp); - return val; - } -raise_overflow: - PyErr_SetString(PyExc_OverflowError, - "value too large to convert to int"); - return (int) -1; -raise_neg_overflow: - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to int"); - return (int) -1; -} - -/* CIntToPy */ -static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { - const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; - const int is_unsigned = neg_one > const_zero; - if (is_unsigned) { - if (sizeof(long) < sizeof(long)) { - return PyInt_FromLong((long) value); - } else if (sizeof(long) <= sizeof(unsigned long)) { - return PyLong_FromUnsignedLong((unsigned long) value); -#ifdef HAVE_LONG_LONG - } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { - return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); -#endif - } - } else { - if (sizeof(long) <= sizeof(long)) { - return PyInt_FromLong((long) value); -#ifdef HAVE_LONG_LONG - } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { - return PyLong_FromLongLong((PY_LONG_LONG) value); -#endif - } - } - { - int one = 1; int little = (int)*(unsigned char *)&one; - unsigned char *bytes = (unsigned char *)&value; - return _PyLong_FromByteArray(bytes, sizeof(long), - little, !is_unsigned); - } -} - -/* CIntFromPy */ -static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { - const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; - const int is_unsigned = neg_one > const_zero; -#if PY_MAJOR_VERSION < 3 - if (likely(PyInt_Check(x))) { - if (sizeof(long) < sizeof(long)) { - __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) - } else { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - goto raise_neg_overflow; - } - return (long) val; - } - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { -#if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)x)->ob_digit; - switch (Py_SIZE(x)) { - case 0: return (long) 0; - case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) - case 2: - if (8 * sizeof(long) > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { - return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); - } - } - break; - case 3: - if (8 * sizeof(long) > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { - return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); - } - } - break; - case 4: - if (8 * sizeof(long) > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { - return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); - } - } - break; - } -#endif -#if CYTHON_COMPILING_IN_CPYTHON - if (unlikely(Py_SIZE(x) < 0)) { - goto raise_neg_overflow; - } -#else - { - int result = PyObject_RichCompareBool(x, Py_False, Py_LT); - if (unlikely(result < 0)) - return (long) -1; - if (unlikely(result == 1)) - goto raise_neg_overflow; - } -#endif - if (sizeof(long) <= sizeof(unsigned long)) { - __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) -#ifdef HAVE_LONG_LONG - } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { - __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) -#endif - } - } else { -#if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)x)->ob_digit; - switch (Py_SIZE(x)) { - case 0: return (long) 0; - case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) - case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) - case -2: - if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { - return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - case 2: - if (8 * sizeof(long) > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { - return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - case -3: - if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { - return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - case 3: - if (8 * sizeof(long) > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { - return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - case -4: - if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { - return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - case 4: - if (8 * sizeof(long) > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { - return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - } -#endif - if (sizeof(long) <= sizeof(long)) { - __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) -#ifdef HAVE_LONG_LONG - } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { - __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) -#endif - } - } - { -#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) - PyErr_SetString(PyExc_RuntimeError, - "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); -#else - long val; - PyObject *v = __Pyx_PyNumber_IntOrLong(x); - #if PY_MAJOR_VERSION < 3 - if (likely(v) && !PyLong_Check(v)) { - PyObject *tmp = v; - v = PyNumber_Long(tmp); - Py_DECREF(tmp); - } - #endif - if (likely(v)) { - int one = 1; int is_little = (int)*(unsigned char *)&one; - unsigned char *bytes = (unsigned char *)&val; - int ret = _PyLong_AsByteArray((PyLongObject *)v, - bytes, sizeof(val), - is_little, !is_unsigned); - Py_DECREF(v); - if (likely(!ret)) - return val; - } -#endif - return (long) -1; - } - } else { - long val; - PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); - if (!tmp) return (long) -1; - val = __Pyx_PyInt_As_long(tmp); - Py_DECREF(tmp); - return val; - } -raise_overflow: - PyErr_SetString(PyExc_OverflowError, - "value too large to convert to long"); - return (long) -1; -raise_neg_overflow: - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to long"); - return (long) -1; -} - -/* FastTypeChecks */ -#if CYTHON_COMPILING_IN_CPYTHON -static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { - while (a) { - a = a->tp_base; - if (a == b) - return 1; - } - return b == &PyBaseObject_Type; -} -static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { - PyObject *mro; - if (a == b) return 1; - mro = a->tp_mro; - if (likely(mro)) { - Py_ssize_t i, n; - n = PyTuple_GET_SIZE(mro); - for (i = 0; i < n; i++) { - if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) - return 1; - } - return 0; - } - return __Pyx_InBases(a, b); -} -#if PY_MAJOR_VERSION == 2 -static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { - PyObject *exception, *value, *tb; - int res; - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - __Pyx_ErrFetch(&exception, &value, &tb); - res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; - if (unlikely(res == -1)) { - PyErr_WriteUnraisable(err); - res = 0; - } - if (!res) { - res = PyObject_IsSubclass(err, exc_type2); - if (unlikely(res == -1)) { - PyErr_WriteUnraisable(err); - res = 0; - } - } - __Pyx_ErrRestore(exception, value, tb); - return res; -} -#else -static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { - int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; - if (!res) { - res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); - } - return res; -} -#endif -static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { - Py_ssize_t i, n; - assert(PyExceptionClass_Check(exc_type)); - n = PyTuple_GET_SIZE(tuple); -#if PY_MAJOR_VERSION >= 3 - for (i=0; ip) { - #if PY_MAJOR_VERSION < 3 - if (t->is_unicode) { - *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); - } else if (t->intern) { - *t->p = PyString_InternFromString(t->s); - } else { - *t->p = PyString_FromStringAndSize(t->s, t->n - 1); - } - #else - if (t->is_unicode | t->is_str) { - if (t->intern) { - *t->p = PyUnicode_InternFromString(t->s); - } else if (t->encoding) { - *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); - } else { - *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); - } - } else { - *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); - } - #endif - if (!*t->p) - return -1; - if (PyObject_Hash(*t->p) == -1) - return -1; - ++t; - } - return 0; -} - -static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { - return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); -} -static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { - Py_ssize_t ignore; - return __Pyx_PyObject_AsStringAndSize(o, &ignore); -} -#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT -#if !CYTHON_PEP393_ENABLED -static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { - char* defenc_c; - PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); - if (!defenc) return NULL; - defenc_c = PyBytes_AS_STRING(defenc); -#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII - { - char* end = defenc_c + PyBytes_GET_SIZE(defenc); - char* c; - for (c = defenc_c; c < end; c++) { - if ((unsigned char) (*c) >= 128) { - PyUnicode_AsASCIIString(o); - return NULL; - } - } - } -#endif - *length = PyBytes_GET_SIZE(defenc); - return defenc_c; -} -#else -static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { - if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; -#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII - if (likely(PyUnicode_IS_ASCII(o))) { - *length = PyUnicode_GET_LENGTH(o); - return PyUnicode_AsUTF8(o); - } else { - PyUnicode_AsASCIIString(o); - return NULL; - } -#else - return PyUnicode_AsUTF8AndSize(o, length); -#endif -} -#endif -#endif -static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { -#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT - if ( -#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII - __Pyx_sys_getdefaultencoding_not_ascii && -#endif - PyUnicode_Check(o)) { - return __Pyx_PyUnicode_AsStringAndSize(o, length); - } else -#endif -#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) - if (PyByteArray_Check(o)) { - *length = PyByteArray_GET_SIZE(o); - return PyByteArray_AS_STRING(o); - } else -#endif - { - char* result; - int r = PyBytes_AsStringAndSize(o, &result, length); - if (unlikely(r < 0)) { - return NULL; - } else { - return result; - } - } -} -static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { - int is_true = x == Py_True; - if (is_true | (x == Py_False) | (x == Py_None)) return is_true; - else return PyObject_IsTrue(x); -} -static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) { - int retval; - if (unlikely(!x)) return -1; - retval = __Pyx_PyObject_IsTrue(x); - Py_DECREF(x); - return retval; -} -static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { -#if PY_MAJOR_VERSION >= 3 - if (PyLong_Check(result)) { - if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, - "__int__ returned non-int (type %.200s). " - "The ability to return an instance of a strict subclass of int " - "is deprecated, and may be removed in a future version of Python.", - Py_TYPE(result)->tp_name)) { - Py_DECREF(result); - return NULL; - } - return result; - } -#endif - PyErr_Format(PyExc_TypeError, - "__%.4s__ returned non-%.4s (type %.200s)", - type_name, type_name, Py_TYPE(result)->tp_name); - Py_DECREF(result); - return NULL; -} -static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { -#if CYTHON_USE_TYPE_SLOTS - PyNumberMethods *m; -#endif - const char *name = NULL; - PyObject *res = NULL; -#if PY_MAJOR_VERSION < 3 - if (likely(PyInt_Check(x) || PyLong_Check(x))) -#else - if (likely(PyLong_Check(x))) -#endif - return __Pyx_NewRef(x); -#if CYTHON_USE_TYPE_SLOTS - m = Py_TYPE(x)->tp_as_number; - #if PY_MAJOR_VERSION < 3 - if (m && m->nb_int) { - name = "int"; - res = m->nb_int(x); - } - else if (m && m->nb_long) { - name = "long"; - res = m->nb_long(x); - } - #else - if (likely(m && m->nb_int)) { - name = "int"; - res = m->nb_int(x); - } - #endif -#else - if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { - res = PyNumber_Int(x); - } -#endif - if (likely(res)) { -#if PY_MAJOR_VERSION < 3 - if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { -#else - if (unlikely(!PyLong_CheckExact(res))) { -#endif - return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); - } - } - else if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_TypeError, - "an integer is required"); - } - return res; -} -static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { - Py_ssize_t ival; - PyObject *x; -#if PY_MAJOR_VERSION < 3 - if (likely(PyInt_CheckExact(b))) { - if (sizeof(Py_ssize_t) >= sizeof(long)) - return PyInt_AS_LONG(b); - else - return PyInt_AsSsize_t(b); - } -#endif - if (likely(PyLong_CheckExact(b))) { - #if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)b)->ob_digit; - const Py_ssize_t size = Py_SIZE(b); - if (likely(__Pyx_sst_abs(size) <= 1)) { - ival = likely(size) ? digits[0] : 0; - if (size == -1) ival = -ival; - return ival; - } else { - switch (size) { - case 2: - if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { - return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - case -2: - if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { - return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - case 3: - if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { - return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - case -3: - if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { - return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - case 4: - if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { - return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - case -4: - if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { - return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - } - } - #endif - return PyLong_AsSsize_t(b); - } - x = PyNumber_Index(b); - if (!x) return -1; - ival = PyInt_AsSsize_t(x); - Py_DECREF(x); - return ival; -} -static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) { - return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False); -} -static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { - return PyInt_FromSize_t(ival); -} - - -#endif /* Py_PYTHON_H */ From 7272afcb7d3200c011217700ed78196439515966 Mon Sep 17 00:00:00 2001 From: GALI PREM SAGAR Date: Tue, 31 Mar 2020 17:54:06 -0500 Subject: [PATCH 11/21] Apply suggestions from code review Co-Authored-By: Keith Kraus --- python/cudf/cudf/_cuda/gpu.pyx | 2 +- python/cudf/cudf/utils/gpu_utils.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/python/cudf/cudf/_cuda/gpu.pyx b/python/cudf/cudf/_cuda/gpu.pyx index 09211b58d9b..a2c5db64ab0 100644 --- a/python/cudf/cudf/_cuda/gpu.pyx +++ b/python/cudf/cudf/_cuda/gpu.pyx @@ -256,7 +256,7 @@ def getDeviceCount(): return -1 if status != 0 else count -def getDeviceAttribute(attr, device): +def getDeviceAttribute(object attr, int device): """ Returns information about the device. diff --git a/python/cudf/cudf/utils/gpu_utils.py b/python/cudf/cudf/utils/gpu_utils.py index d7712e967d0..c914aa54f45 100644 --- a/python/cudf/cudf/utils/gpu_utils.py +++ b/python/cudf/cudf/utils/gpu_utils.py @@ -26,7 +26,7 @@ def validate_setup(): # Turing 7.5 # Volta 7.x # Pascal 6.x - # Maxwell 5.x + # Maxwell 5.x # Kepler 3.x # Fermi 2.x pass @@ -39,7 +39,7 @@ def validate_setup(): cuda_runtime_version = runtimeGetVersion() - if cuda_runtime_version > 10000: + if cuda_runtime_version >= 10000: # CUDA Runtime Version Check: Runtime version is greater than 10000 pass else: @@ -92,5 +92,5 @@ def validate_setup(): else: warnings.warn( - "You donot have an NVIDIA GPU, please install one and try again" + "No NVIDIA GPU detected" ) From 7c8cb5bdb70be82f56faa104f5a4bea6a071cd0d Mon Sep 17 00:00:00 2001 From: galipremsagar Date: Tue, 31 Mar 2020 16:11:24 -0700 Subject: [PATCH 12/21] remove except + for c apis --- python/cudf/cudf/_cuda/gpu.pxd | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/python/cudf/cudf/_cuda/gpu.pxd b/python/cudf/cudf/_cuda/gpu.pxd index f2428eade81..8f2ac05a879 100644 --- a/python/cudf/cudf/_cuda/gpu.pxd +++ b/python/cudf/cudf/_cuda/gpu.pxd @@ -105,10 +105,9 @@ cdef extern from "cuda.h" nogil: cdef extern from "cuda_runtime_api.h" nogil: - int cudaDriverGetVersion(int* driverVersion) except + - int cudaRuntimeGetVersion(int* runtimeVersion) except + - int cudaGetDeviceCount(int* count) except + - int cudaDeviceGetAttribute(int* value, cudaDeviceAttr attr, int device) \ - except + + int cudaDriverGetVersion(int* driverVersion) + int cudaRuntimeGetVersion(int* runtimeVersion) + int cudaGetDeviceCount(int* count) + int cudaDeviceGetAttribute(int* value, cudaDeviceAttr attr, int device) ctypedef int underlying_type_attribute From 6818152626681eaada6ccfa7e26faaa25f88cc91 Mon Sep 17 00:00:00 2001 From: galipremsagar Date: Tue, 31 Mar 2020 17:22:26 -0700 Subject: [PATCH 13/21] add param types in docs --- python/cudf/cudf/_cuda/gpu.pyx | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/python/cudf/cudf/_cuda/gpu.pyx b/python/cudf/cudf/_cuda/gpu.pyx index a2c5db64ab0..22710e373f3 100644 --- a/python/cudf/cudf/_cuda/gpu.pyx +++ b/python/cudf/cudf/_cuda/gpu.pyx @@ -261,9 +261,10 @@ def getDeviceAttribute(object attr, int device): Returns information about the device. Parameters - attr + ---------- + attr : object (CudaDeviceAttr) Device attribute to query - device + device : int Device number to query """ From 6ac3a936fa5db96af9af590107bee0ab276599ab Mon Sep 17 00:00:00 2001 From: galipremsagar Date: Tue, 31 Mar 2020 18:14:40 -0700 Subject: [PATCH 14/21] add getDeviceProperties api --- python/cudf/cudf/_cuda/gpu.pxd | 84 +++++++++++++++++++++++++++++ python/cudf/cudf/_cuda/gpu.pyx | 10 +++- python/cudf/cudf/utils/gpu_utils.py | 14 +++-- 3 files changed, 102 insertions(+), 6 deletions(-) diff --git a/python/cudf/cudf/_cuda/gpu.pxd b/python/cudf/cudf/_cuda/gpu.pxd index 8f2ac05a879..385f2bd9876 100644 --- a/python/cudf/cudf/_cuda/gpu.pxd +++ b/python/cudf/cudf/_cuda/gpu.pxd @@ -103,11 +103,95 @@ cdef extern from "cuda.h" nogil: cudaDevAttrPageableMemoryAccessUsesHostPageTables = 100 cudaDevAttrDirectManagedMemAccessFromHost = 101 + ctypedef struct CUuuid_st: + char bytes[16] + + ctypedef CUuuid_st cudaUUID_t + + ctypedef struct cudaDeviceProp: + int ECCEnabled + int asyncEngineCount + int canMapHostMemory + int canUseHostPointerForRegisteredMem + int clockRate + int computeMode + int computePreemptionSupported + int concurrentKernels + int concurrentManagedAccess + int cooperativeLaunch + int cooperativeMultiDeviceLaunch + int deviceOverlap + int directManagedMemAccessFromHost + int globalL1CacheSupported + int hostNativeAtomicSupported + int integrated + int isMultiGpuBoard + int kernelExecTimeoutEnabled + int l2CacheSize + int localL1CacheSupported + char luid[8] + unsigned int luidDeviceNodeMask + int major + int managedMemory + int maxGridSize[3] + int maxSurface1D + int maxSurface1DLayered[2] + int maxSurface2D[2] + int maxSurface2DLayered[3] + int maxSurface3D[3] + int maxSurfaceCubemap + int maxSurfaceCubemapLayered[2] + int maxTexture1D + int maxTexture1DLayered[2] + int maxTexture1DLinear + int maxTexture1DMipmap + int maxTexture2D[2] + int maxTexture2DGather[2] + int maxTexture2DLayered[3] + int maxTexture2DLinear[3] + int maxTexture2DMipmap[2] + int maxTexture3D[3] + int maxTexture3DAlt[3] + int maxTextureCubemap + int maxTextureCubemapLayered[2] + int maxThreadsDim[3] + int maxThreadsPerBlock + int maxThreadsPerMultiProcessor + size_t memPitch + int memoryBusWidth + int memoryClockRate + int minor + int multiGpuBoardGroupID + int multiProcessorCount + char name[256] + int pageableMemoryAccess + int pageableMemoryAccessUsesHostPageTables + int pciBusID + int pciDeviceID + int pciDomainID + int regsPerBlock + int regsPerMultiprocessor + size_t sharedMemPerBlock + size_t sharedMemPerBlockOptin + size_t sharedMemPerMultiprocessor + int singleToDoublePrecisionPerfRatio + int streamPrioritiesSupported + size_t surfaceAlignment + int tccDriver + size_t textureAlignment + size_t texturePitchAlignment + size_t totalConstMem + size_t totalGlobalMem + int unifiedAddressing + cudaUUID_t uuid + int warpSize + cdef extern from "cuda_runtime_api.h" nogil: int cudaDriverGetVersion(int* driverVersion) int cudaRuntimeGetVersion(int* runtimeVersion) int cudaGetDeviceCount(int* count) int cudaDeviceGetAttribute(int* value, cudaDeviceAttr attr, int device) + int cudaGetDeviceProperties(cudaDeviceProp* prop, int device) ctypedef int underlying_type_attribute diff --git a/python/cudf/cudf/_cuda/gpu.pyx b/python/cudf/cudf/_cuda/gpu.pyx index 22710e373f3..c3ddeb6a4d1 100644 --- a/python/cudf/cudf/_cuda/gpu.pyx +++ b/python/cudf/cudf/_cuda/gpu.pyx @@ -5,7 +5,9 @@ from cudf._cuda.gpu cimport ( cudaRuntimeGetVersion, cudaGetDeviceCount, cudaDeviceGetAttribute, - cudaDeviceAttr + cudaDeviceAttr, + cudaGetDeviceProperties, + cudaDeviceProp ) from enum import IntEnum from cudf._cuda.gpu cimport underlying_type_attribute as c_attr @@ -271,3 +273,9 @@ def getDeviceAttribute(object attr, int device): cdef int value status = cudaDeviceGetAttribute(&value, attr, device) return -1 if status != 0 else value + + +def getDeviceProperties(int device): + cdef cudaDeviceProp prop + status = cudaGetDeviceProperties(&prop, device) + return None if status != 0 else prop diff --git a/python/cudf/cudf/utils/gpu_utils.py b/python/cudf/cudf/utils/gpu_utils.py index c914aa54f45..bcb1e9d3024 100644 --- a/python/cudf/cudf/utils/gpu_utils.py +++ b/python/cudf/cudf/utils/gpu_utils.py @@ -5,6 +5,7 @@ def validate_setup(): runtimeGetVersion, getDeviceAttribute, CudaDeviceAttr, + getDeviceProperties, ) import warnings @@ -31,10 +32,15 @@ def validate_setup(): # Fermi 2.x pass else: - + device_props = getDeviceProperties(0) warnings.warn( "You will need a GPU with NVIDIA Pascal™ architecture or \ - better" + better\n" + "Detected GPU 0 : " + str(device_props["name"].decode()) + "\n" + "Detected Compute Capability : " + + str(device_props["major"]) + + "." + + str(device_props["minor"]) ) cuda_runtime_version = runtimeGetVersion() @@ -91,6 +97,4 @@ def validate_setup(): else: - warnings.warn( - "No NVIDIA GPU detected" - ) + warnings.warn("No NVIDIA GPU detected") From 5171b0e4f74c59ee55906013c54f6a1b5955b264 Mon Sep 17 00:00:00 2001 From: galipremsagar Date: Tue, 31 Mar 2020 18:17:54 -0700 Subject: [PATCH 15/21] do inline skip of isort --- python/cudf/cudf/__init__.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/python/cudf/cudf/__init__.py b/python/cudf/cudf/__init__.py index c73c657cba8..c2c0ab2c97a 100644 --- a/python/cudf/cudf/__init__.py +++ b/python/cudf/cudf/__init__.py @@ -1,15 +1,13 @@ -# Copyright (c) 2018-2019, NVIDIA CORPORATION. -""" __init__.py +# Copyright (c) 2018-2020, NVIDIA CORPORATION. - isort:skip_file -""" - -from cudf.utils.gpu_utils import validate_setup +from cudf.utils.gpu_utils import validate_setup # isort:skip validate_setup() import cupy + import rmm + from cudf import core, datasets from cudf._version import get_versions from cudf.core import DataFrame, Index, MultiIndex, Series, from_pandas, merge From 0643e71b5212bfa7bd172dfced224fcf55b257ed Mon Sep 17 00:00:00 2001 From: galipremsagar Date: Wed, 1 Apr 2020 16:13:27 -0700 Subject: [PATCH 16/21] add error handling --- python/cudf/cudf/_cuda/gpu.pxd | 117 ++++++++++++++++++++++++++++ python/cudf/cudf/_cuda/gpu.pyx | 38 +++++++-- python/cudf/cudf/utils/gpu_utils.py | 7 +- 3 files changed, 155 insertions(+), 7 deletions(-) diff --git a/python/cudf/cudf/_cuda/gpu.pxd b/python/cudf/cudf/_cuda/gpu.pxd index 385f2bd9876..baf1ad32078 100644 --- a/python/cudf/cudf/_cuda/gpu.pxd +++ b/python/cudf/cudf/_cuda/gpu.pxd @@ -103,6 +103,120 @@ cdef extern from "cuda.h" nogil: cudaDevAttrPageableMemoryAccessUsesHostPageTables = 100 cudaDevAttrDirectManagedMemAccessFromHost = 101 + ctypedef enum cudaError: + cudaSuccess = 0 + cudaErrorInvalidValue = 1 + cudaErrorMemoryAllocation = 2 + cudaErrorInitializationError = 3 + cudaErrorCudartUnloading = 4 + cudaErrorProfilerDisabled = 5 + cudaErrorProfilerNotInitialized = 6 + cudaErrorProfilerAlreadyStarted = 7 + cudaErrorProfilerAlreadyStopped = 8 + cudaErrorInvalidConfiguration = 9 + cudaErrorInvalidPitchValue = 12 + cudaErrorInvalidSymbol = 13 + cudaErrorInvalidHostPointer = 16 + cudaErrorInvalidDevicePointer = 17 + cudaErrorInvalidTexture = 18 + cudaErrorInvalidTextureBinding = 19 + cudaErrorInvalidChannelDescriptor = 20 + cudaErrorInvalidMemcpyDirection = 21 + cudaErrorAddressOfConstant = 22 + cudaErrorTextureFetchFailed = 23 + cudaErrorTextureNotBound = 24 + cudaErrorSynchronizationError = 25 + cudaErrorInvalidFilterSetting = 26 + cudaErrorInvalidNormSetting = 27 + cudaErrorMixedDeviceExecution = 28 + cudaErrorNotYetImplemented = 31 + cudaErrorMemoryValueTooLarge = 32 + cudaErrorInsufficientDriver = 35 + cudaErrorInvalidSurface = 37 + cudaErrorDuplicateVariableName = 43 + cudaErrorDuplicateTextureName = 44 + cudaErrorDuplicateSurfaceName = 45 + cudaErrorDevicesUnavailable = 46 + cudaErrorIncompatibleDriverContext = 49 + cudaErrorMissingConfiguration = 52 + cudaErrorPriorLaunchFailure = 53 + cudaErrorLaunchMaxDepthExceeded = 65 + cudaErrorLaunchFileScopedTex = 66 + cudaErrorLaunchFileScopedSurf = 67 + cudaErrorSyncDepthExceeded = 68 + cudaErrorLaunchPendingCountExceeded = 69 + cudaErrorInvalidDeviceFunction = 98 + cudaErrorNoDevice = 100 + cudaErrorInvalidDevice = 101 + cudaErrorStartupFailure = 127 + cudaErrorInvalidKernelImage = 200 + cudaErrorDeviceUninitialized = 201 + cudaErrorMapBufferObjectFailed = 205 + cudaErrorUnmapBufferObjectFailed = 206 + cudaErrorArrayIsMapped = 207 + cudaErrorAlreadyMapped = 208 + cudaErrorNoKernelImageForDevice = 209 + cudaErrorAlreadyAcquired = 210 + cudaErrorNotMapped = 211 + cudaErrorNotMappedAsArray = 212 + cudaErrorNotMappedAsPointer = 213 + cudaErrorECCUncorrectable = 214 + cudaErrorUnsupportedLimit = 215 + cudaErrorDeviceAlreadyInUse = 216 + cudaErrorPeerAccessUnsupported = 217 + cudaErrorInvalidPtx = 218 + cudaErrorInvalidGraphicsContext = 219 + cudaErrorNvlinkUncorrectable = 220 + cudaErrorJitCompilerNotFound = 221 + cudaErrorInvalidSource = 300 + cudaErrorFileNotFound = 301 + cudaErrorSharedObjectSymbolNotFound = 302 + cudaErrorSharedObjectInitFailed = 303 + cudaErrorOperatingSystem = 304 + cudaErrorInvalidResourceHandle = 400 + cudaErrorIllegalState = 401 + cudaErrorSymbolNotFound = 500 + cudaErrorNotReady = 600 + cudaErrorIllegalAddress = 700 + cudaErrorLaunchOutOfResources = 701 + cudaErrorLaunchTimeout = 702 + cudaErrorLaunchIncompatibleTexturing = 703 + cudaErrorPeerAccessAlreadyEnabled = 704 + cudaErrorPeerAccessNotEnabled = 705 + cudaErrorSetOnActiveProcess = 708 + cudaErrorContextIsDestroyed = 709 + cudaErrorAssert = 710 + cudaErrorTooManyPeers = 711 + cudaErrorHostMemoryAlreadyRegistered = 712 + cudaErrorHostMemoryNotRegistered = 713 + cudaErrorHardwareStackError = 714 + cudaErrorIllegalInstruction = 715 + cudaErrorMisalignedAddress = 716 + cudaErrorInvalidAddressSpace = 717 + cudaErrorInvalidPc = 718 + cudaErrorLaunchFailure = 719 + cudaErrorCooperativeLaunchTooLarge = 720 + cudaErrorNotPermitted = 800 + cudaErrorNotSupported = 801 + cudaErrorSystemNotReady = 802 + cudaErrorSystemDriverMismatch = 803 + cudaErrorCompatNotSupportedOnDevice = 804 + cudaErrorStreamCaptureUnsupported = 900 + cudaErrorStreamCaptureInvalidated = 901 + cudaErrorStreamCaptureMerge = 902 + cudaErrorStreamCaptureUnmatched = 903 + cudaErrorStreamCaptureUnjoined = 904 + cudaErrorStreamCaptureIsolation = 905 + cudaErrorStreamCaptureImplicit = 906 + cudaErrorCapturedEvent = 907 + cudaErrorStreamCaptureWrongThread = 908 + cudaErrorTimeout = 909 + cudaErrorGraphExecUpdateFailure = 910 + cudaErrorUnknown = 999 + cudaErrorApiFailureBase = 10000 + + ctypedef cudaError cudaError_t + ctypedef struct CUuuid_st: char bytes[16] @@ -194,4 +308,7 @@ cdef extern from "cuda_runtime_api.h" nogil: int cudaDeviceGetAttribute(int* value, cudaDeviceAttr attr, int device) int cudaGetDeviceProperties(cudaDeviceProp* prop, int device) + const char* cudaGetErrorName(cudaError_t error) + const char* cudaGetErrorString(cudaError_t error) + ctypedef int underlying_type_attribute diff --git a/python/cudf/cudf/_cuda/gpu.pyx b/python/cudf/cudf/_cuda/gpu.pyx index c3ddeb6a4d1..5820cfc5b1e 100644 --- a/python/cudf/cudf/_cuda/gpu.pyx +++ b/python/cudf/cudf/_cuda/gpu.pyx @@ -7,12 +7,28 @@ from cudf._cuda.gpu cimport ( cudaDeviceGetAttribute, cudaDeviceAttr, cudaGetDeviceProperties, - cudaDeviceProp + cudaDeviceProp, + cudaGetErrorName, + cudaGetErrorString, + cudaError_t ) from enum import IntEnum from cudf._cuda.gpu cimport underlying_type_attribute as c_attr +class CUDARuntimeError(RuntimeError): + + def __init__(self, status): + self.status = status + cdef bytes name = cudaGetErrorName(status) + cdef bytes msg = cudaGetErrorString(status) + super(CUDARuntimeError, self).__init__( + '%s: %s' % (name.decode(), msg.decode())) + + def __reduce__(self): + return (type(self), (self.status,)) + + class CudaDeviceAttr(IntEnum): cudaDevAttrMaxThreadsPerBlock = \ cudaDeviceAttr.cudaDevAttrMaxThreadsPerBlock @@ -228,7 +244,9 @@ def driverGetVersion(): """ cdef int version status = cudaDriverGetVersion(&version) - return -1 if status != 0 else version + if status != 0: + raise CUDARuntimeError(status) + return version def runtimeGetVersion(): @@ -242,7 +260,9 @@ def runtimeGetVersion(): cdef int version status = cudaRuntimeGetVersion(&version) - return -1 if status != 0 else version + if status != 0: + raise CUDARuntimeError(status) + return version def getDeviceCount(): @@ -255,7 +275,9 @@ def getDeviceCount(): cdef int count status = cudaGetDeviceCount(&count) - return -1 if status != 0 else count + if status != 0: + raise CUDARuntimeError(status) + return count def getDeviceAttribute(object attr, int device): @@ -272,10 +294,14 @@ def getDeviceAttribute(object attr, int device): cdef int value status = cudaDeviceGetAttribute(&value, attr, device) - return -1 if status != 0 else value + if status != 0: + raise CUDARuntimeError(status) + return value def getDeviceProperties(int device): cdef cudaDeviceProp prop status = cudaGetDeviceProperties(&prop, device) - return None if status != 0 else prop + if status != 0: + raise CUDARuntimeError(status) + return prop diff --git a/python/cudf/cudf/utils/gpu_utils.py b/python/cudf/cudf/utils/gpu_utils.py index bcb1e9d3024..b39d13ee86b 100644 --- a/python/cudf/cudf/utils/gpu_utils.py +++ b/python/cudf/cudf/utils/gpu_utils.py @@ -6,10 +6,15 @@ def validate_setup(): getDeviceAttribute, CudaDeviceAttr, getDeviceProperties, + CUDARuntimeError, ) import warnings - gpus_count = getDeviceCount() + try: + gpus_count = getDeviceCount() + except CUDARuntimeError: + # If there is no GPU detected, set `gpus_count` to -1 + gpus_count = -1 if gpus_count > 0: # Cupy throws RunTimeException to get GPU count, From 3a7ab8cc3e048f0770c352879b95c0679d377275 Mon Sep 17 00:00:00 2001 From: galipremsagar Date: Wed, 1 Apr 2020 16:22:11 -0700 Subject: [PATCH 17/21] add docs --- python/cudf/cudf/_cuda/gpu.pyx | 24 +++++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/python/cudf/cudf/_cuda/gpu.pyx b/python/cudf/cudf/_cuda/gpu.pyx index 5820cfc5b1e..3bb9a91fdaf 100644 --- a/python/cudf/cudf/_cuda/gpu.pyx +++ b/python/cudf/cudf/_cuda/gpu.pyx @@ -240,7 +240,8 @@ def driverGetVersion(): CUDA 9.2 would be represented by 9020. If no driver is installed, then 0 is returned as the driver version. - This function returns -1 if driver version is NULL. + This function automatically raises CUDARuntimeError with error message + and status code. """ cdef int version status = cudaDriverGetVersion(&version) @@ -255,7 +256,8 @@ def runtimeGetVersion(): The version is returned as (1000 major + 10 minor). For example, CUDA 9.2 would be represented by 9020. - This function returns -1 if runtime version is NULL. + This function automatically raises CUDARuntimeError with error message + and status code. """ cdef int version @@ -270,7 +272,8 @@ def getDeviceCount(): Returns the number of devices with compute capability greater or equal to 2.0 that are available for execution. - This function returns -1 if NULL device pointer is assigned. + This function automatically raises CUDARuntimeError with error message + and status code. """ cdef int count @@ -290,6 +293,9 @@ def getDeviceAttribute(object attr, int device): Device attribute to query device : int Device number to query + + This function automatically raises CUDARuntimeError with error message + and status code. """ cdef int value @@ -300,6 +306,18 @@ def getDeviceAttribute(object attr, int device): def getDeviceProperties(int device): + """ + Returns information about the compute-device. + + Parameters + ---------- + device : int + Device number to query + + This function automatically raises CUDARuntimeError with error message + and status code. + """ + cdef cudaDeviceProp prop status = cudaGetDeviceProperties(&prop, device) if status != 0: From 012a6debaed18aa9ce87c87b3c17d2eae6072542 Mon Sep 17 00:00:00 2001 From: galipremsagar Date: Wed, 1 Apr 2020 16:46:15 -0700 Subject: [PATCH 18/21] print the detected cuda runtime version --- python/cudf/cudf/utils/gpu_utils.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/python/cudf/cudf/utils/gpu_utils.py b/python/cudf/cudf/utils/gpu_utils.py index b39d13ee86b..40ae84176bd 100644 --- a/python/cudf/cudf/utils/gpu_utils.py +++ b/python/cudf/cudf/utils/gpu_utils.py @@ -56,8 +56,13 @@ def validate_setup(): else: from cudf.errors import UnSupportedCUDAError + minor_version = cuda_runtime_version % 100 + major_version = (cuda_runtime_version - minor_version) // 1000 raise UnSupportedCUDAError( - "Please update your CUDA Runtime to 10.0 or above" + "Detected CUDA Runtime version is {0}.{1}" + "Please update your CUDA Runtime to 10.0 or above".format( + major_version, str(minor_version)[0] + ) ) cuda_driver_supported_rt_version = driverGetVersion() From 6032e64797f275fcca2c15cd0e35d3a41fb34b20 Mon Sep 17 00:00:00 2001 From: GALI PREM SAGAR Date: Wed, 1 Apr 2020 21:00:02 -0500 Subject: [PATCH 19/21] Apply suggestions from code review Co-Authored-By: Keith Kraus --- python/cudf/cudf/utils/gpu_utils.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/python/cudf/cudf/utils/gpu_utils.py b/python/cudf/cudf/utils/gpu_utils.py index 40ae84176bd..90cb65edba9 100644 --- a/python/cudf/cudf/utils/gpu_utils.py +++ b/python/cudf/cudf/utils/gpu_utils.py @@ -39,10 +39,9 @@ def validate_setup(): else: device_props = getDeviceProperties(0) warnings.warn( - "You will need a GPU with NVIDIA Pascal™ architecture or \ - better\n" - "Detected GPU 0 : " + str(device_props["name"].decode()) + "\n" - "Detected Compute Capability : " + "You will need a GPU with NVIDIA Pascal™ or newer architecture\n" + "Detected GPU 0: " + str(device_props["name"].decode()) + "\n" + "Detected Compute Capability: " + str(device_props["major"]) + "." + str(device_props["minor"]) From b0bb1136ff7a4a094566d33b23fc54a5b90cac78 Mon Sep 17 00:00:00 2001 From: galipremsagar Date: Wed, 1 Apr 2020 19:16:50 -0700 Subject: [PATCH 20/21] fetching only the properties required instead of queries all props of the device --- python/cudf/cudf/_cuda/gpu.pxd | 1 + python/cudf/cudf/_cuda/gpu.pyx | 24 +++++++++++++++++++++++- python/cudf/cudf/utils/gpu_utils.py | 13 ++++++++----- 3 files changed, 32 insertions(+), 6 deletions(-) diff --git a/python/cudf/cudf/_cuda/gpu.pxd b/python/cudf/cudf/_cuda/gpu.pxd index baf1ad32078..c39545ecc20 100644 --- a/python/cudf/cudf/_cuda/gpu.pxd +++ b/python/cudf/cudf/_cuda/gpu.pxd @@ -310,5 +310,6 @@ cdef extern from "cuda_runtime_api.h" nogil: const char* cudaGetErrorName(cudaError_t error) const char* cudaGetErrorString(cudaError_t error) + int cuDeviceGetName(char* name, int len, int device) ctypedef int underlying_type_attribute diff --git a/python/cudf/cudf/_cuda/gpu.pyx b/python/cudf/cudf/_cuda/gpu.pyx index 3bb9a91fdaf..b5741952f20 100644 --- a/python/cudf/cudf/_cuda/gpu.pyx +++ b/python/cudf/cudf/_cuda/gpu.pyx @@ -10,9 +10,11 @@ from cudf._cuda.gpu cimport ( cudaDeviceProp, cudaGetErrorName, cudaGetErrorString, - cudaError_t + cudaError_t, + cuDeviceGetName ) from enum import IntEnum +from libc.stdlib cimport malloc from cudf._cuda.gpu cimport underlying_type_attribute as c_attr @@ -323,3 +325,23 @@ def getDeviceProperties(int device): if status != 0: raise CUDARuntimeError(status) return prop + + +def deviceGetName(int device): + """ + Returns an identifer string for the device. + + Parameters + ---------- + device : int + Device number to query + + This function automatically raises CUDARuntimeError with error message + and status code. + """ + + cdef char* device_name = malloc(256 * sizeof(char)) + status = cuDeviceGetName(device_name, 256, device) + if status != 0: + raise CUDARuntimeError(status) + return device_name diff --git a/python/cudf/cudf/utils/gpu_utils.py b/python/cudf/cudf/utils/gpu_utils.py index 40ae84176bd..7afb1a8a0f7 100644 --- a/python/cudf/cudf/utils/gpu_utils.py +++ b/python/cudf/cudf/utils/gpu_utils.py @@ -5,8 +5,8 @@ def validate_setup(): runtimeGetVersion, getDeviceAttribute, CudaDeviceAttr, - getDeviceProperties, CUDARuntimeError, + deviceGetName, ) import warnings @@ -37,15 +37,18 @@ def validate_setup(): # Fermi 2.x pass else: - device_props = getDeviceProperties(0) + device_name = deviceGetName(0) + minor_version = getDeviceAttribute( + CudaDeviceAttr.cudaDevAttrComputeCapabilityMinor, 0 + ) warnings.warn( "You will need a GPU with NVIDIA Pascal™ architecture or \ better\n" - "Detected GPU 0 : " + str(device_props["name"].decode()) + "\n" + "Detected GPU 0 : " + str(device_name.decode()) + "\n" "Detected Compute Capability : " - + str(device_props["major"]) + + str(major_version) + "." - + str(device_props["minor"]) + + str(minor_version) ) cuda_runtime_version = runtimeGetVersion() From 2e23210a0bff3155ce1fff94c2a5012e34cb0730 Mon Sep 17 00:00:00 2001 From: GALI PREM SAGAR Date: Wed, 1 Apr 2020 21:26:03 -0500 Subject: [PATCH 21/21] Update python/cudf/cudf/_cuda/gpu.pxd --- python/cudf/cudf/_cuda/gpu.pxd | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/cudf/cudf/_cuda/gpu.pxd b/python/cudf/cudf/_cuda/gpu.pxd index c39545ecc20..94b56601914 100644 --- a/python/cudf/cudf/_cuda/gpu.pxd +++ b/python/cudf/cudf/_cuda/gpu.pxd @@ -310,6 +310,6 @@ cdef extern from "cuda_runtime_api.h" nogil: const char* cudaGetErrorName(cudaError_t error) const char* cudaGetErrorString(cudaError_t error) - int cuDeviceGetName(char* name, int len, int device) + int cuDeviceGetName(char* name, int length, int device) ctypedef int underlying_type_attribute